hash
stringlengths 40
40
| date
stringdate 2018-12-11 14:31:19
2025-03-22 02:45:31
| author
stringclasses 280
values | commit_message
stringlengths 14
176
| is_merge
bool 1
class | git_diff
stringlengths 198
25.8M
⌀ | type
stringclasses 83
values | masked_commit_message
stringlengths 8
170
|
|---|---|---|---|---|---|---|---|
a4642b55e9b374ccd974b662e7b17a2389c3dcbd
|
2024-09-23 22:22:40
|
Paul Rogers
|
fix: Update AWS storage timeout error for Go 1.23 behavior (#14226)
| false
|
diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go
index 2b5458af6af52..a7489bf847706 100644
--- a/pkg/storage/chunk/client/aws/s3_storage_client.go
+++ b/pkg/storage/chunk/client/aws/s3_storage_client.go
@@ -550,7 +550,9 @@ func (a *S3ObjectClient) IsStorageTimeoutErr(err error) bool {
// TODO(dannyk): move these out to be generic
// context errors are all client-side
if isContextErr(err) {
- return false
+ // Go 1.23 changed the type of the error returned by the http client when a timeout occurs
+ // while waiting for headers. This is a server side timeout.
+ return strings.Contains(err.Error(), "Client.Timeout")
}
// connection misconfiguration, or writing on a closed connection
|
fix
|
Update AWS storage timeout error for Go 1.23 behavior (#14226)
|
62c5c5c64182736f65ec9c903e0789986b264425
|
2024-07-26 19:52:38
|
George Robinson
|
fix: add missing flush op timeout (#13679)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index 0e1687d06cc78..a7774c34c3ce6 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -358,7 +358,7 @@ ingester_rf1:
# The timeout for an individual flush. Will be retried up to
# `flush-op-backoff-retries` times.
# CLI flag: -ingester-rf1.flush-op-timeout
- [flush_op_timeout: <duration> | default = 10m]
+ [flush_op_timeout: <duration> | default = 10s]
# Forget about ingesters having heartbeat timestamps older than
# `ring.kvstore.heartbeat_timeout`. This is equivalent to clicking on the
diff --git a/pkg/ingester-rf1/flush.go b/pkg/ingester-rf1/flush.go
index aa22166d4fd3e..55601337d350f 100644
--- a/pkg/ingester-rf1/flush.go
+++ b/pkg/ingester-rf1/flush.go
@@ -96,8 +96,10 @@ func (i *Ingester) flush(l log.Logger, j int, it *wal.PendingSegment) error {
}
func (i *Ingester) flushSegment(ctx context.Context, j int, w *wal.SegmentWriter) error {
- start := time.Now()
+ ctx, cancelFunc := context.WithTimeout(ctx, i.cfg.FlushOpTimeout)
+ defer cancelFunc()
+ start := time.Now()
i.metrics.flushesTotal.Add(1)
defer func() { i.metrics.flushDuration.Observe(time.Since(start).Seconds()) }()
diff --git a/pkg/ingester-rf1/ingester.go b/pkg/ingester-rf1/ingester.go
index 0b5a6c5fd724a..8ee0d0e8928b3 100644
--- a/pkg/ingester-rf1/ingester.go
+++ b/pkg/ingester-rf1/ingester.go
@@ -110,7 +110,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.FlushOpBackoff.MinBackoff, "ingester-rf1.flush-op-backoff-min-period", 100*time.Millisecond, "Minimum backoff period when a flush fails. Each concurrent flush has its own backoff, see `ingester.concurrent-flushes`.")
f.DurationVar(&cfg.FlushOpBackoff.MaxBackoff, "ingester-rf1.flush-op-backoff-max-period", time.Minute, "Maximum backoff period when a flush fails. Each concurrent flush has its own backoff, see `ingester.concurrent-flushes`.")
f.IntVar(&cfg.FlushOpBackoff.MaxRetries, "ingester-rf1.flush-op-backoff-retries", 10, "Maximum retries for failed flushes.")
- f.DurationVar(&cfg.FlushOpTimeout, "ingester-rf1.flush-op-timeout", 10*time.Minute, "The timeout for an individual flush. Will be retried up to `flush-op-backoff-retries` times.")
+ f.DurationVar(&cfg.FlushOpTimeout, "ingester-rf1.flush-op-timeout", 10*time.Second, "The timeout for an individual flush. Will be retried up to `flush-op-backoff-retries` times.")
f.DurationVar(&cfg.MaxSegmentAge, "ingester-rf1.max-segment-age", 500*time.Millisecond, "The maximum age of a segment before it should be flushed. Increasing this value allows more time for a segment to grow to max-segment-size, but may increase latency if the write volume is too small.")
f.IntVar(&cfg.MaxSegmentSize, "ingester-rf1.max-segment-size", 8*1024*1024, "The maximum size of a segment before it should be flushed. It is not a strict limit, and segments can exceed the maximum size when individual appends are larger than the remaining capacity.")
f.IntVar(&cfg.MaxSegments, "ingester-rf1.max-segments", 10, "The maximum number of segments to buffer in-memory. Increasing this value allows for large bursts of writes to be buffered in memory, but may increase latency if the write volume exceeds the rate at which segments can be flushed.")
|
fix
|
add missing flush op timeout (#13679)
|
9077c7548670a5c197101bad5a8cfc2521ed3c25
|
2024-03-21 00:13:02
|
Trevor Whitney
|
ci: CVE scanning and bump release pipeline (#12031)
| false
|
diff --git a/.github/workflows/snyk-pr-comment.yml b/.github/workflows/snyk-pr-comment.yml
deleted file mode 100644
index c54e9c55c3b58..0000000000000
--- a/.github/workflows/snyk-pr-comment.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-name: PR Vulnerability Scan
-on: pull_request
-
-permissions:
- pull-requests: write
- issues: write
-
-jobs:
- snyk:
- name: Snyk Scan
- runs-on: ubuntu-latest
- if: ${{ !github.event.pull_request.head.repo.fork }}
- steps:
- - name: Checkout code
- uses: actions/checkout@master
- - name: Run Snyk to check for vulnerabilities
- uses: snyk/actions/golang@master
- continue-on-error: true # To make sure that PR comment is made
- env:
- SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
- with:
- command: test
- args: --severity-threshold=high --json-file-output=snyk.json
-
- - name: Prepare Snyk message
- run: |
- echo "Snyk scan found the following vulnerabilities:" > snyk.txt
-
- - name: Format Snyk Message
- uses: sergeysova/jq-action@v2
- continue-on-error: true
- with:
- cmd: jq -r '.vulnerabilities[] | "* **\(.severity)** - [\(.identifiers.CVE[0])] \(.title) in `\(.moduleName)` v\(.version). Fixed in \(.fixedIn)"' snyk.json >> snyk.txt
-
- - name: Determine whether to comment
- continue-on-error: true
- id: should-comment
- run: |
- if [[ $(wc -l < snyk.txt) -gt 1 ]]; then
- echo "\nTo see more details on these vulnerabilities, and how/where to fix them, please run `make scan-vulnerabilities` on your branch. If these were not introduced by your PR, please considering fixing them in `main` via a subsequent PR. Thanks!" >> snyk.txt
- exit 0;
- fi
-
- exit 1
-
- - name: Comment on PR with Snyk scan results
- uses: mshick/add-pr-comment@v2
- if: ${{ steps.should-comment.outcome == 'success' }}
- with:
- message-id: snyk-${{ github.event.number }}
- message-path: snyk.txt
diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml
index 1c4c8a3616789..2239756cb67c8 100644
--- a/.github/workflows/snyk.yml
+++ b/.github/workflows/snyk.yml
@@ -1,11 +1,12 @@
name: Snyk Monitor Scanning
on:
release:
- types: [published]
+ types: [published]
push:
branches:
- - 'main'
+ - 'main'
- 'master'
+ - 'release-[0-9]+.[0-9]+.x'
workflow_dispatch:
jobs:
diff --git a/.github/workflows/trivy-pr-comment.yml b/.github/workflows/trivy-pr-comment.yml
deleted file mode 100644
index ca69cb1b3ba7d..0000000000000
--- a/.github/workflows/trivy-pr-comment.yml
+++ /dev/null
@@ -1,67 +0,0 @@
-name: PR Vulnerability Scan
-on: pull_request_target
-
-permissions:
- pull-requests: write
- issues: write
-
-jobs:
- trivy:
- name: Trivy Scan
- runs-on: ubuntu-20.04
- steps:
- - name: Checkout code
- uses: actions/checkout@v3
-
- - name: Build Loki Image
- run: |
- IMAGE_TAG="$(./tools/image-tag)"
- make loki-image
- echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_ENV
-
- - name: Run Trivy image scanner
- uses: aquasecurity/trivy-action@master
- with:
- image-ref: "docker.io/grafana/loki:${{ env.IMAGE_TAG }}"
- format: "json"
- output: "trivy-image.json"
- severity: "CRITICAL,HIGH"
-
- - name: Run Trivy fs scanner
- uses: aquasecurity/trivy-action@master
- with:
- scan-type: "fs"
- scan-ref: "go.mod"
- format: "json"
- output: "trivy-fs.json"
- severity: "CRITICAL,HIGH"
-
- - name: Prepare Trivy Message
- run: |
- echo "Trivy scan found the following vulnerabilities:" > trivy.txt
-
- - name: Format Trivy Message
- uses: sergeysova/jq-action@v2
- continue-on-error: true
- with:
- cmd: |
- jq -r '.Results[] | select(.Vulnerabilities != null) | .Target as $target | .Type as $type | .Vulnerabilities[] | "* **\(.Severity)**, Target: \($target), Type: \($type) [\(.Title)](\(.PrimaryURL)) in `\(.PkgName)` v\(.InstalledVersion). Fixed in v\(.FixedVersion)"' trivy-image.json >> trivy.txt
- jq -r '.Results[] | select(.Vulnerabilities != null) | .Target as $target | .Type as $type | .Vulnerabilities[] | "* **\(.Severity)**, Target: \($target), Type: \($type) [\(.Title)](\(.PrimaryURL)) in `\(.PkgName)` v\(.InstalledVersion). Fixed in v\(.FixedVersion)"' trivy-fs.json >> trivy.text
-
- - name: Determine whether to comment
- continue-on-error: true
- id: should-comment
- run: |
- if [[ $(wc -l < trivy.txt) -gt 1 ]]; then
- echo "\nTo see more details on these vulnerabilities, and how/where to fix them, please run `make scan-vulnerabilities` on your branch. If these were not introduced by your PR, please considering fixing them in `main` via a subsequent PR. Thanks!" >> trivy.txt
- exit 0;
- fi
-
- exit 1
-
- - name: Comment on PR with Trivy scan results
- uses: mshick/add-pr-comment@v2
- if: ${{ steps.should-comment.outcome == 'success' }}
- with:
- message-id: trivy-${{ github.event.number }}
- message-path: trivy.txt
|
ci
|
CVE scanning and bump release pipeline (#12031)
|
f1425b6c24e9d90c99477f67289c3aa34f69573d
|
2024-10-03 00:43:57
|
Alexander Soelberg Heidarsson
|
feat: mixin, allow overriding of some labels by parameterizing mixin recording/alert rules (#11495)
| false
|
diff --git a/production/loki-mixin/alerts.libsonnet b/production/loki-mixin/alerts.libsonnet
index 02fb2a0ee5662..0d34086d9b3a5 100644
--- a/production/loki-mixin/alerts.libsonnet
+++ b/production/loki-mixin/alerts.libsonnet
@@ -6,12 +6,12 @@
rules: [
{
alert: 'LokiRequestErrors',
- expr: std.strReplace(|||
- 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (cluster, namespace, job, route)
+ expr: |||
+ 100 * sum(rate(loki_request_duration_seconds_count{status_code=~"5.."}[2m])) by (%(group_by_cluster)s, job, route)
/
- sum(rate(loki_request_duration_seconds_count[2m])) by (cluster, namespace, job, route)
+ sum(rate(loki_request_duration_seconds_count[2m])) by (%(group_by_cluster)s, job, route)
> 10
- |||, 'cluster', $._config.per_cluster_label),
+ ||| % $._config,
'for': '15m',
labels: {
severity: 'critical',
@@ -26,8 +26,8 @@
{
alert: 'LokiRequestPanics',
expr: |||
- sum(increase(loki_panic_total[10m])) by (%s, namespace, job) > 0
- ||| % $._config.per_cluster_label,
+ sum(increase(loki_panic_total[10m])) by (%(group_by_cluster)s, job) > 0
+ ||| % $._config,
labels: {
severity: 'critical',
},
@@ -41,8 +41,8 @@
{
alert: 'LokiRequestLatency',
expr: |||
- %s_namespace_job_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*|/schedulerpb.SchedulerForQuerier/QuerierLoop"} > 1
- ||| % $._config.per_cluster_label,
+ %(group_prefix_jobs)s_route:loki_request_duration_seconds:99quantile{route!~"(?i).*tail.*|/schedulerpb.SchedulerForQuerier/QuerierLoop"} > 1
+ ||| % $._config,
'for': '15m',
labels: {
severity: 'critical',
@@ -57,8 +57,8 @@
{
alert: 'LokiTooManyCompactorsRunning',
expr: |||
- sum(loki_boltdb_shipper_compactor_running) by (%s, namespace) > 1
- ||| % $._config.per_cluster_label,
+ sum(loki_boltdb_shipper_compactor_running) by (%(group_by_cluster)s) > 1
+ ||| % $._config,
'for': '5m',
labels: {
severity: 'warning',
diff --git a/production/loki-mixin/config.libsonnet b/production/loki-mixin/config.libsonnet
index eeea227c8f068..4b9053739e537 100644
--- a/production/loki-mixin/config.libsonnet
+++ b/production/loki-mixin/config.libsonnet
@@ -1,4 +1,7 @@
{
+ local makePrefix(groups) = std.join('_', groups),
+ local makeGroupBy(groups) = std.join(', ', groups),
+
_config+:: {
// Tags for dashboards.
tags: ['loki'],
@@ -11,6 +14,20 @@
// The label used to differentiate between different clusters.
per_cluster_label: 'cluster',
+ per_namespace_label: 'namespace',
+ per_job_label: 'job',
+
+ // Grouping labels, to uniquely identify and group by {jobs, clusters}
+ job_labels: [$._config.per_cluster_label, $._config.per_namespace_label, $._config.per_job_label],
+ cluster_labels: [$._config.per_cluster_label, $._config.per_namespace_label],
+
+ // Each group prefix is composed of `_`-separated labels
+ group_prefix_jobs: makePrefix($._config.job_labels),
+ group_prefix_clusters: makePrefix($._config.cluster_labels),
+
+ // Each group-by label list is `, `-separated and unique identifies
+ group_by_job: makeGroupBy($._config.job_labels),
+ group_by_cluster: makeGroupBy($._config.cluster_labels),
// Enable dashboard and panels for Grafana Labs internal components.
internal_components: false,
diff --git a/production/loki-mixin/dashboards.libsonnet b/production/loki-mixin/dashboards.libsonnet
index 8b1ced08f3d15..a28f276cd66da 100644
--- a/production/loki-mixin/dashboards.libsonnet
+++ b/production/loki-mixin/dashboards.libsonnet
@@ -1,4 +1,3 @@
-(import 'config.libsonnet') +
(import 'dashboards/loki-retention.libsonnet') +
(import 'dashboards/loki-chunks.libsonnet') +
(import 'dashboards/loki-logs.libsonnet') +
diff --git a/production/loki-mixin/mixin-ssd.libsonnet b/production/loki-mixin/mixin-ssd.libsonnet
index 01c59bb6ab7cc..273777ebeda9e 100644
--- a/production/loki-mixin/mixin-ssd.libsonnet
+++ b/production/loki-mixin/mixin-ssd.libsonnet
@@ -1,6 +1,4 @@
-(import 'dashboards.libsonnet') +
-(import 'alerts.libsonnet') +
-(import 'recording_rules.libsonnet') + {
+(import 'mixin.libsonnet') + {
grafanaDashboardFolder: 'Loki SSD',
_config+:: {
diff --git a/production/loki-mixin/mixin.libsonnet b/production/loki-mixin/mixin.libsonnet
index 53584824c6331..72673ff3e43d7 100644
--- a/production/loki-mixin/mixin.libsonnet
+++ b/production/loki-mixin/mixin.libsonnet
@@ -1,5 +1,6 @@
(import 'dashboards.libsonnet') +
(import 'alerts.libsonnet') +
+(import 'config.libsonnet') +
(import 'recording_rules.libsonnet') + {
grafanaDashboardFolder: 'Loki',
// Without this, configs is not taken into account
|
feat
|
mixin, allow overriding of some labels by parameterizing mixin recording/alert rules (#11495)
|
8aa345bed4ed7cb043bd40bc3b88e913ac6c4fa1
|
2025-03-07 06:33:22
|
renovate[bot]
|
chore(deps): update terraform aws to ~> 5.90.0 (main) (#16618)
| false
|
diff --git a/production/terraform/modules/s3/versions.tf b/production/terraform/modules/s3/versions.tf
index f4ebc6fa47903..e4daeb2b122a3 100644
--- a/production/terraform/modules/s3/versions.tf
+++ b/production/terraform/modules/s3/versions.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 5.89.0"
+ version = "~> 5.90.0"
}
random = {
|
chore
|
update terraform aws to ~> 5.90.0 (main) (#16618)
|
52d745fc65b7eb643059e0abc01ea2df099d3c4e
|
2024-12-27 22:43:57
|
Ashwanth
|
fix(blockbuilder): copy entries returned by record decoder (#15549)
| false
|
diff --git a/pkg/blockbuilder/builder/builder.go b/pkg/blockbuilder/builder/builder.go
index ad981e3183d0e..f0c2f2ed30656 100644
--- a/pkg/blockbuilder/builder/builder.go
+++ b/pkg/blockbuilder/builder/builder.go
@@ -20,6 +20,7 @@ import (
"github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/kafka"
"github.com/grafana/loki/v3/pkg/kafka/partition"
+ "github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores"
@@ -546,11 +547,15 @@ func (i *BlockBuilder) loadRecords(ctx context.Context, partitionID int32, offse
continue
}
+ // decorder reuses entries slice, so we need to copy it
+ entries := make([]logproto.Entry, len(stream.Entries))
+ copy(entries, stream.Entries)
+
converted = append(converted, AppendInput{
tenant: record.TenantID,
labels: labels,
labelsStr: stream.Labels,
- entries: stream.Entries,
+ entries: entries,
})
}
|
fix
|
copy entries returned by record decoder (#15549)
|
901320c0669c9e6e351310b0559b547ad7e12f13
|
2025-03-15 05:37:41
|
renovate[bot]
|
fix(deps): update dependency lucide-react to ^0.482.0 (main) (#16767)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index 3084dcb070a15..9fd68437c9ef0 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -36,7 +36,7 @@
"cmdk": "^1.0.0",
"date-fns": "^4.0.0",
"lodash": "^4.17.21",
- "lucide-react": "^0.481.0",
+ "lucide-react": "^0.482.0",
"next-themes": "^0.4.4",
"prism-react-renderer": "^2.4.1",
"react": "^19.0.0",
@@ -4985,9 +4985,9 @@
}
},
"node_modules/lucide-react": {
- "version": "0.481.0",
- "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.481.0.tgz",
- "integrity": "sha512-NrvUDNFwgLIvHiwTEq9boa5Kiz1KdUT8RJ+wmNijwxdn9U737Fw42c43sRxJTMqhL+ySHpGRVCWpwiF+abrEjw==",
+ "version": "0.482.0",
+ "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.482.0.tgz",
+ "integrity": "sha512-XM8PzHzSrg8ATmmO+fzf+JyYlVVdQnJjuyLDj2p4V2zEtcKeBNAqAoJIGFv1x2HSBa7kT8gpYUxwdQ0g7nypfw==",
"license": "ISC",
"peerDependencies": {
"react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
diff --git a/pkg/ui/frontend/package.json b/pkg/ui/frontend/package.json
index 0601aadfe027c..26350c5e10ed3 100644
--- a/pkg/ui/frontend/package.json
+++ b/pkg/ui/frontend/package.json
@@ -38,7 +38,7 @@
"cmdk": "^1.0.0",
"date-fns": "^4.0.0",
"lodash": "^4.17.21",
- "lucide-react": "^0.481.0",
+ "lucide-react": "^0.482.0",
"next-themes": "^0.4.4",
"prism-react-renderer": "^2.4.1",
"react": "^19.0.0",
|
fix
|
update dependency lucide-react to ^0.482.0 (main) (#16767)
|
3039ca06f4d0fe964943bebcc29d9a4991143635
|
2022-11-11 19:58:14
|
Dominik Philp
|
docs: Fix typo in helm scalable installation documentation, object storage section (#7676)
| false
|
diff --git a/docs/sources/installation/helm/install-scalable/index.md b/docs/sources/installation/helm/install-scalable/index.md
index 3cc348520c14c..e9c95b164e31b 100644
--- a/docs/sources/installation/helm/install-scalable/index.md
+++ b/docs/sources/installation/helm/install-scalable/index.md
@@ -62,7 +62,7 @@ It is not possible to run the scalable mode with the `filesystem` storage.
insecure: false
```
- Consult the [Reference](../reference) for configuring otehr storage providers.
+ Consult the [Reference](../reference) for configuring other storage providers.
- Define the AWS S3 credentials in the file.
|
docs
|
Fix typo in helm scalable installation documentation, object storage section (#7676)
|
e71d33bbeed6ce4bf7fc10686ff7a6321f7ec089
|
2021-03-16 13:44:59
|
Kaviraj
|
doc(gcplog): Add note on scraping multiple GCP projects (#3430)
| false
|
diff --git a/docs/sources/clients/promtail/gcplog-cloud.md b/docs/sources/clients/promtail/gcplog-cloud.md
index 3343ee2dd1d04..5d995e4b4d39b 100644
--- a/docs/sources/clients/promtail/gcplog-cloud.md
+++ b/docs/sources/clients/promtail/gcplog-cloud.md
@@ -72,6 +72,10 @@ We need a service account with following permissions.
This enables promtail to read log entries from the pubsub subscription created before.
+you can find example for promtail scrape config for `gcplog` [here](../scraping/#gcplog-scraping)
+
+If you are scraping logs from multiple GCP projects, then this serviceaccount should have above permissions in all the projects you are tyring to scrape.
+
## Operations
Sometimes you may wish to clear the pending pubsub queue containing logs.
@@ -88,13 +92,13 @@ To delete all the old messages until now, set `--time` to current time.
gcloud pubsub subscriptions seek projects/my-project/subscriptions/cloud-logs --time=$(date +%Y-%m-%dT%H:%M:%S)
```
-# Advanced log filter
+## Advanced log filter
So far we've covered admitting GCS bucket logs into Loki, but often one may need to add multiple cloud resource logs and may also need to exclude unnecessary logs. The following is a more complex example.
We use the `log-filter` option to include logs and the `exclusion` option to exclude them.
-## Use Case
+### Use Case
Include following cloud resource logs
- GCS bucket
- Kubernetes
|
doc
|
Add note on scraping multiple GCP projects (#3430)
|
f4119de4df2b0908c99785ceec65a205e833712b
|
2023-02-10 21:58:37
|
Hervé Nicol
|
helm: values to override read/write/backend paths in gateway config (#8490)
| false
|
diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md
index dbb3bc95a7485..a8836f1bf7077 100644
--- a/docs/sources/installation/helm/reference.md
+++ b/docs/sources/installation/helm/reference.md
@@ -1013,6 +1013,33 @@ false
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>gateway.nginxConfig.customBackendUrl</td>
+ <td>string</td>
+ <td>Override Backend URL</td>
+ <td><pre lang="json">
+null
+</pre>
+</td>
+ </tr>
+ <tr>
+ <td>gateway.nginxConfig.customReadUrl</td>
+ <td>string</td>
+ <td>Override Read URL</td>
+ <td><pre lang="json">
+null
+</pre>
+</td>
+ </tr>
+ <tr>
+ <td>gateway.nginxConfig.customWriteUrl</td>
+ <td>string</td>
+ <td>Override Write URL</td>
+ <td><pre lang="json">
+null
+</pre>
</td>
</tr>
<tr>
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 44b7064b32c08..54919cbe57da3 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,8 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. Add your changelog bellow this line. This locator is used by CI pipeline to find the place where to put changelog entry.)
+- [FEATURE] add `gateway.nginxConfig.customReadUrl`, `gateway.nginxConfig.customWriteUrl` and `gateway.nginxConfig.customBackendUrl` to override read/write/backend paths.
+
## 4.5.1
- [BUGFIX] Fix rendering of namespace in provisioner job.
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index af5291fc79cf4..bf1a08dfb6878 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -588,73 +588,87 @@ http {
{{- $writeHost = include "loki.singleBinaryFullname" .}}
{{- end }}
+ {{- $writeUrl := printf "http://%s.%s.svc.%s:3100" $writeHost .Release.Namespace .Values.global.clusterDomain }}
+ {{- $readUrl := printf "http://%s.%s.svc.%s:3100" $readHost .Release.Namespace .Values.global.clusterDomain }}
+ {{- $backendUrl := printf "http://%s.%s.svc.%s:3100" $backendHost .Release.Namespace .Values.global.clusterDomain }}
+
+ {{- if .Values.gateway.nginxConfig.customWriteUrl }}
+ {{- $writeUrl = .Values.gateway.nginxConfig.customWriteUrl }}
+ {{- end }}
+ {{- if .Values.gateway.nginxConfig.customReadUrl }}
+ {{- $readUrl = .Values.gateway.nginxConfig.customReadUrl }}
+ {{- end }}
+ {{- if .Values.gateway.nginxConfig.customBackendUrl }}
+ {{- $backendUrl = .Values.gateway.nginxConfig.customBackendUrl }}
+ {{- end }}
+
location = /api/prom/push {
- proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $writeUrl }}$request_uri;
}
location = /api/prom/tail {
- proxy_pass http://{{ $readHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $readUrl }}$request_uri;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location ~ /api/prom/.* {
- proxy_pass http://{{ $readHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $readUrl }}$request_uri;
}
location ~ /prometheus/api/v1/alerts.* {
- proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $backendUrl }}$request_uri;
}
location ~ /prometheus/api/v1/rules.* {
- proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $backendUrl }}$request_uri;
}
location ~ /ruler/.* {
- proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $backendUrl }}$request_uri;
}
location = /loki/api/v1/push {
- proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $writeUrl }}$request_uri;
}
location = /loki/api/v1/tail {
- proxy_pass http://{{ $readHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $readUrl }}$request_uri;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
location ~ /compactor/.* {
- proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $backendUrl }}$request_uri;
}
location ~ /distributor/.* {
- proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $writeUrl }}$request_uri;
}
location ~ /ring {
- proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $writeUrl }}$request_uri;
}
location ~ /ingester/.* {
- proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $writeUrl }}$request_uri;
}
location ~ /store-gateway/.* {
- proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $backendUrl }}$request_uri;
}
location ~ /query-scheduler/.* {
- proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $backendUrl }}$request_uri;
}
location ~ /scheduler/.* {
- proxy_pass http://{{ $backendHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $backendUrl }}$request_uri;
}
location ~ /loki/api/.* {
- proxy_pass http://{{ $readHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $readUrl }}$request_uri;
}
location ~ /admin/api/.* {
- proxy_pass http://{{ $writeHost }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
+ proxy_pass {{ $writeUrl }}$request_uri;
}
{{- with .Values.gateway.nginxConfig.serverSnippet }}
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index 6058d189833d4..2eb05cd2ba109 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -1197,6 +1197,12 @@ gateway:
serverSnippet: ""
# -- Allows appending custom configuration to the http block
httpSnippet: ""
+ # -- Override Read URL
+ customReadUrl: null
+ # -- Override Write URL
+ customWriteUrl: null
+ # -- Override Backend URL
+ customBackendUrl: null
# -- Config file contents for Nginx. Passed through the `tpl` function to allow templating
# @default -- See values.yaml
file: |
|
helm
|
values to override read/write/backend paths in gateway config (#8490)
|
66d71383c7b926185cd4e93bdda51dc3ee4fb317
|
2024-08-08 21:01:41
|
Grot (@grafanabot)
|
chore: [main] chore(release-3.1.x): release 3.1.1 (#13817)
| false
|
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 6c893e10c6631..8517b670b8b4f 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,4 +1,4 @@
{
- ".": "3.1.0",
+ ".": "3.1.1",
"operator": "0.6.1"
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f0c6b2959cf7b..f37f14a95cc0d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,17 @@
# Changelog
+## [3.1.1](https://github.com/grafana/loki/compare/v3.1.0...v3.1.1) (2024-08-08)
+
+
+### Features
+
+* **loki:** add ability to disable AWS S3 dual stack endpoints usage ([#13795](https://github.com/grafana/loki/issues/13795)) ([464ac73](https://github.com/grafana/loki/commit/464ac736a6fb70b673ee3cec21049b18d353cadb))
+
+
+### Bug Fixes
+
+* **deps:** bumped dependencies versions to resolve CVEs ([#13789](https://github.com/grafana/loki/issues/13789)) ([34206cd](https://github.com/grafana/loki/commit/34206cd2d6290566034710ae6c2d08af8804bc91))
+
## [3.0.0](https://github.com/grafana/loki/compare/v2.9.6...v3.0.0) (2024-04-08)
Starting with the 3.0 release we began using [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) and [release-please](https://github.com/googleapis/release-please) to generate the changelog. As a result the format has changed slightly from previous releases.
|
chore
|
[main] chore(release-3.1.x): release 3.1.1 (#13817)
|
0c24a704a2c710962fd3e740c737e26f3636c05f
|
2024-11-22 03:21:36
|
renovate[bot]
|
fix(deps): update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.68.0 (#15060)
| false
|
diff --git a/nix/packages/loki.nix b/nix/packages/loki.nix
index 2893ac54de3f8..ee161437786e6 100644
--- a/nix/packages/loki.nix
+++ b/nix/packages/loki.nix
@@ -5,7 +5,7 @@ let
pname = "lambda-promtail";
src = ./../../tools/lambda-promtail;
- vendorHash = "sha256-jMpd0Seu8zWj7uE7fxH2TDYuZwr+ESDf4H++5SNK1Xg=";
+ vendorHash = "sha256-6soQ9GaQSvxg3ivJHFZbqye7+TF8XLJuylOZz+Zjal0=";
doCheck = false;
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod
index ba9c463497926..e485cf814d2bd 100644
--- a/tools/lambda-promtail/go.mod
+++ b/tools/lambda-promtail/go.mod
@@ -6,7 +6,7 @@ require (
github.com/aws/aws-lambda-go v1.47.0
github.com/aws/aws-sdk-go-v2 v1.32.5
github.com/aws/aws-sdk-go-v2/config v1.28.5
- github.com/aws/aws-sdk-go-v2/service/s3 v1.67.1
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.68.0
github.com/go-kit/log v0.2.1
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum
index 5a472a08a832c..44abdc2e222e7 100644
--- a/tools/lambda-promtail/go.sum
+++ b/tools/lambda-promtail/go.sum
@@ -74,8 +74,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5 h1:wtpJ4zcwr
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.5/go.mod h1:qu/W9HXQbbQ4+1+JcZp0ZNPV31ym537ZJN+fiS7Ti8E=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5 h1:P1doBzv5VEg1ONxnJss1Kh5ZG/ewoIE4MQtKKc6Crgg=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.5/go.mod h1:NOP+euMW7W3Ukt28tAxPuoWao4rhhqJD3QEBk7oCg7w=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.67.1 h1:LXLnDfjT/P6SPIaCE86xCOjJROPn4FNB2EdN68vMK5c=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.67.1/go.mod h1:ralv4XawHjEMaHOWnTFushl0WRqim/gQWesAMF6hTow=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.68.0 h1:bFpcqdwtAEsgpZXvkTxIThFQx/EM0oV6kXmfFIGjxME=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.68.0/go.mod h1:ralv4XawHjEMaHOWnTFushl0WRqim/gQWesAMF6hTow=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.6 h1:3zu537oLmsPfDMyjnUS2g+F2vITgy5pB74tHI+JBNoM=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.6/go.mod h1:WJSZH2ZvepM6t6jwu4w/Z45Eoi75lPN7DcydSRtJg6Y=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.5 h1:K0OQAsDywb0ltlFrZm0JHPY3yZp/S9OaoLU33S7vPS8=
|
fix
|
update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.68.0 (#15060)
|
eb8a36306674c497d8b0150b482f275e2c00f6c9
|
2024-07-01 13:48:01
|
Ashwanth
|
fix(sharding): use without() grouping when merging `avg_over_time` shard results (#12176)
| false
|
diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go
index 948aef03876bc..ae313ea1fc48f 100644
--- a/pkg/logql/downstream_test.go
+++ b/pkg/logql/downstream_test.go
@@ -57,6 +57,9 @@ func TestMappingEquivalence(t *testing.T) {
{`sum(rate({a=~".+"} |= "foo" != "foo"[1s]) or vector(1))`, false, nil},
{`avg_over_time({a=~".+"} | logfmt | unwrap value [1s])`, false, nil},
{`avg_over_time({a=~".+"} | logfmt | unwrap value [1s]) by (a)`, true, nil},
+ {`avg_over_time({a=~".+"} | logfmt | unwrap value [1s]) without (stream)`, true, nil},
+ {`avg_over_time({a=~".+"} | logfmt | drop level | unwrap value [1s])`, true, nil},
+ {`avg_over_time({a=~".+"} | logfmt | drop level | unwrap value [1s]) without (stream)`, true, nil},
{`quantile_over_time(0.99, {a=~".+"} | logfmt | unwrap value [1s])`, true, []string{ShardQuantileOverTime}},
{
`
diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go
index e55b01504537f..003362913171d 100644
--- a/pkg/logql/shardmapper.go
+++ b/pkg/logql/shardmapper.go
@@ -397,13 +397,18 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr,
return m.mapSampleExpr(expr, r)
}
+ grouping := expr.Grouping
+ if grouping == nil {
+ grouping = &syntax.Grouping{Without: true}
+ }
+
// avg_over_time() by (foo) -> sum by (foo) (sum_over_time()) / sum by (foo) (count_over_time())
lhs, lhsBytesPerShard, err := m.mapVectorAggregationExpr(&syntax.VectorAggregationExpr{
Left: &syntax.RangeAggregationExpr{
Left: expr.Left,
Operation: syntax.OpRangeTypeSum,
},
- Grouping: expr.Grouping,
+ Grouping: grouping,
Operation: syntax.OpTypeSum,
}, r, false)
if err != nil {
@@ -416,12 +421,21 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr,
return nil, 0, err
}
+ // labelSampleExtractor includes the unwrap identifier in without() list if no grouping is specified
+ // similar change is required for the RHS here to ensure the resulting label sets match
+ rhsGrouping := *grouping
+ if rhsGrouping.Without {
+ if expr.Left.Unwrap != nil {
+ rhsGrouping.Groups = append(rhsGrouping.Groups, expr.Left.Unwrap.Identifier)
+ }
+ }
+
rhs, rhsBytesPerShard, err := m.mapVectorAggregationExpr(&syntax.VectorAggregationExpr{
Left: &syntax.RangeAggregationExpr{
Left: countOverTimeSelector,
Operation: syntax.OpRangeTypeCount,
},
- Grouping: expr.Grouping,
+ Grouping: &rhsGrouping,
Operation: syntax.OpTypeSum,
}, r, false)
if err != nil {
diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go
index 784301928583b..9bdd128b6e493 100644
--- a/pkg/logql/shardmapper_test.go
+++ b/pkg/logql/shardmapper_test.go
@@ -392,6 +392,38 @@ func TestMappingStrings(t *testing.T) {
)
)`,
},
+ {
+ in: `avg_over_time({job=~"myapps.*"} |= "stats" | json | keep busy | unwrap busy [5m])`,
+ out: `(
+ sum without() (
+ downstream<sum without() (sum_over_time({job=~"myapps.*"} |="stats" | json | keep busy | unwrap busy [5m])),shard=0_of_2>
+ ++
+ downstream<sum without() (sum_over_time({job=~"myapps.*"} |="stats" | json | keep busy | unwrap busy [5m])),shard=1_of_2>
+ )
+ /
+ sum without(busy) (
+ downstream<sum without(busy) (count_over_time({job=~"myapps.*"} |="stats" | json | keep busy [5m])),shard=0_of_2>
+ ++
+ downstream<sum without(busy) (count_over_time({job=~"myapps.*"} |="stats" | json | keep busy [5m])),shard=1_of_2>
+ )
+ )`,
+ },
+ {
+ in: `avg_over_time({job=~"myapps.*"} |= "stats" | json | keep busy | unwrap busy [5m]) without (foo)`,
+ out: `(
+ sum without(foo) (
+ downstream<sum without(foo) (sum_over_time({job=~"myapps.*"} |="stats" | json | keep busy | unwrap busy [5m])),shard=0_of_2>
+ ++
+ downstream<sum without(foo) (sum_over_time({job=~"myapps.*"} |="stats" | json | keep busy | unwrap busy [5m])),shard=1_of_2>
+ )
+ /
+ sum without(foo,busy) (
+ downstream<sum without(foo,busy) (count_over_time({job=~"myapps.*"} |="stats" | json | keep busy [5m])),shard=0_of_2>
+ ++
+ downstream<sum without(foo,busy) (count_over_time({job=~"myapps.*"} |="stats" | json | keep busy [5m])),shard=1_of_2>
+ )
+ )`,
+ },
// should be noop if VectorExpr
{
in: `vector(0)`,
|
fix
|
use without() grouping when merging `avg_over_time` shard results (#12176)
|
74d206bf286b00a7b0bf9f45d35bf25917316fef
|
2022-10-11 14:12:57
|
Bryan Boreham
|
chunks: improve readability of compression benchmarks (#7246)
| false
|
diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go
index 613185cd6ec10..3ecea49eb6042 100644
--- a/pkg/chunkenc/memchunk_test.go
+++ b/pkg/chunkenc/memchunk_test.go
@@ -675,11 +675,7 @@ func TestIteratorClose(t *testing.T) {
}
}
-var result []Chunk
-
func BenchmarkWrite(b *testing.B) {
- chunks := []Chunk{}
-
entry := &logproto.Entry{
Timestamp: time.Unix(0, 0),
Line: testdata.LogString(0),
@@ -689,6 +685,7 @@ func BenchmarkWrite(b *testing.B) {
for _, f := range HeadBlockFmts {
for _, enc := range testEncoding {
b.Run(fmt.Sprintf("%v-%v", f, enc), func(b *testing.B) {
+ uncompressedBytes, compressedBytes := 0, 0
for n := 0; n < b.N; n++ {
c := NewMemChunk(enc, f, testBlockSize, testTargetSize)
// adds until full so we trigger cut which serialize using gzip
@@ -698,9 +695,11 @@ func BenchmarkWrite(b *testing.B) {
entry.Line = testdata.LogString(i)
i++
}
- chunks = append(chunks, c)
+ uncompressedBytes += c.UncompressedSize()
+ compressedBytes += c.CompressedSize()
}
- result = chunks
+ b.SetBytes(int64(uncompressedBytes) / int64(b.N))
+ b.ReportMetric(float64(compressedBytes)/float64(uncompressedBytes)*100, "%compressed")
})
}
}
@@ -717,23 +716,17 @@ func (nomatchPipeline) ProcessString(_ int64, line string) (string, log.LabelsRe
}
func BenchmarkRead(b *testing.B) {
- type res struct {
- name string
- speed float64
- }
- result := []res{}
for _, bs := range testBlockSizes {
for _, enc := range testEncoding {
name := fmt.Sprintf("%s_%s", enc.String(), humanize.Bytes(uint64(bs)))
b.Run(name, func(b *testing.B) {
chunks, size := generateData(enc, 5, bs, testTargetSize)
+ _, ctx := stats.NewContext(context.Background())
b.ResetTimer()
- bytesRead := uint64(0)
- now := time.Now()
for n := 0; n < b.N; n++ {
for _, c := range chunks {
// use forward iterator for benchmark -- backward iterator does extra allocations by keeping entries in memory
- iterator, err := c.Iterator(context.Background(), time.Unix(0, 0), time.Now(), logproto.FORWARD, nomatchPipeline{})
+ iterator, err := c.Iterator(ctx, time.Unix(0, 0), time.Now(), logproto.FORWARD, nomatchPipeline{})
if err != nil {
panic(err)
}
@@ -744,24 +737,23 @@ func BenchmarkRead(b *testing.B) {
b.Fatal(err)
}
}
- bytesRead += size
}
- result = append(result, res{
- name: name,
- speed: float64(bytesRead) / time.Since(now).Seconds(),
- })
+ b.SetBytes(int64(size))
})
+ }
+ }
- name = fmt.Sprintf("sample_%s_%s", enc.String(), humanize.Bytes(uint64(bs)))
-
+ for _, bs := range testBlockSizes {
+ for _, enc := range testEncoding {
+ name := fmt.Sprintf("sample_%s_%s", enc.String(), humanize.Bytes(uint64(bs)))
b.Run(name, func(b *testing.B) {
chunks, size := generateData(enc, 5, bs, testTargetSize)
+ _, ctx := stats.NewContext(context.Background())
b.ResetTimer()
bytesRead := uint64(0)
- now := time.Now()
for n := 0; n < b.N; n++ {
for _, c := range chunks {
- iterator := c.SampleIterator(context.Background(), time.Unix(0, 0), time.Now(), countExtractor)
+ iterator := c.SampleIterator(ctx, time.Unix(0, 0), time.Now(), countExtractor)
for iterator.Next() {
_ = iterator.Sample()
}
@@ -771,19 +763,10 @@ func BenchmarkRead(b *testing.B) {
}
bytesRead += size
}
- result = append(result, res{
- name: name,
- speed: float64(bytesRead) / time.Since(now).Seconds(),
- })
+ b.SetBytes(int64(bytesRead) / int64(b.N))
})
}
}
- sort.Slice(result, func(i, j int) bool {
- return result[i].speed > result[j].speed
- })
- for _, r := range result {
- fmt.Printf("%s: %.2f MB/s\n", r.name, r.speed/1024/1024)
- }
}
func BenchmarkBackwardIterator(b *testing.B) {
|
chunks
|
improve readability of compression benchmarks (#7246)
|
f20244d7ac579958f420badb9ce5f368c23e3b8c
|
2019-08-28 14:16:14
|
Robert Fratto
|
ksonnet: remove prefix from kvstore.consul settings in loki config (#946)
| false
|
diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet
index 29bdf24caa146..260018a28baf4 100644
--- a/production/ksonnet/loki/config.libsonnet
+++ b/production/ksonnet/loki/config.libsonnet
@@ -117,7 +117,6 @@
store: 'consul',
consul: {
host: 'consul.%s.svc.cluster.local:8500' % $._config.namespace,
- prefix: '',
httpclienttimeout: '20s',
consistentreads: true,
},
@@ -144,7 +143,7 @@
service: 'memcached-client',
},
},
- } +
+ } +
(if std.count($._config.enabledBackends, 'gcs') > 0 then {
gcs: $._config.client_configs.gcs,
} else {}) +
|
ksonnet
|
remove prefix from kvstore.consul settings in loki config (#946)
|
e3b7bcb67d437e0e77b4dc16d8515af992d0f4d5
|
2023-08-10 10:42:38
|
Vardhaman Surana
|
ruler: add limit parameter in rulegroup (#10109)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 337aa88e29363..ab6301899a3d0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,6 +10,7 @@
##### Enhancements
+* [10109](https://github.com/grafana/loki/pull/10109) **vardhaman-surana**: Ruler: add limit parameter in rulegroup
* [9621](https://github.com/grafana/loki/pull/9621) **DylanGuedes**: Introduce TSDB postings cache.
* [10010](https://github.com/grafana/loki/pull/10010) **rasta-rocket**: feat(promtail): retrieve BotTags field from cloudflare
* [9995](https://github.com/grafana/loki/pull/9995) **chaudum**: Add jitter to the flush interval to prevent multiple ingesters to flush at the same time.
diff --git a/docs/sources/alert/_index.md b/docs/sources/alert/_index.md
index f7a91a7233444..45af9a8d43bcf 100644
--- a/docs/sources/alert/_index.md
+++ b/docs/sources/alert/_index.md
@@ -106,6 +106,43 @@ This query (`expr`) will be executed every 1 minute (`interval`), the result of
name we have defined (`record`). This metric named `nginx:requests:rate1m` can now be sent to Prometheus, where it will be stored
just like any other metric.
+
+### Limiting Alerts and Recording Rule Samples
+
+Like [Prometheus](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#limiting-alerts-and-series), you can configure a limit for alerts produced by alerting rules and samples produced by recording rules. This limit can be configured per-group. Using limits can prevent a faulty rule from generating a large number of alerts or recording samples. When the limit is exceeded, all recording samples produced by the rule are discarded, and if it is an alerting rule, all alerts for the rule, active, pending, or inactive, are cleared. The event will be recorded as an error in the evaluation, and the rule health will be set to `err`. The default value for limit is `0` meaning no limit.
+
+#### Example
+
+Here is an example of a rule group along with its limit configured.
+
+
+
+```yaml
+groups:
+ - name: production_rules
+ limit: 10
+ interval: 1m
+ rules:
+ - alert: HighPercentageError
+ expr: |
+ sum(rate({app="foo", env="production"} |= "error" [5m])) by (job)
+ /
+ sum(rate({app="foo", env="production"}[5m])) by (job)
+ > 0.05
+ for: 10m
+ labels:
+ severity: page
+ annotations:
+ summary: High request latency
+ - record: nginx:requests:rate1m
+ expr: |
+ sum(
+ rate({container="nginx"}[1m])
+ )
+ labels:
+ cluster: "us-central1"
+```
+
### Remote-Write
With recording rules, you can run these metric queries continually on an interval, and have the resulting metrics written
diff --git a/pkg/ruler/base/api.go b/pkg/ruler/base/api.go
index 103470b34c1e6..0eb1c594d6361 100644
--- a/pkg/ruler/base/api.go
+++ b/pkg/ruler/base/api.go
@@ -67,6 +67,7 @@ type RuleGroup struct {
// same array.
Rules []rule `json:"rules"`
Interval float64 `json:"interval"`
+ Limit int64 `json:"limit"`
LastEvaluation time.Time `json:"lastEvaluation"`
EvaluationTime float64 `json:"evaluationTime"`
}
@@ -164,6 +165,7 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) {
Interval: g.Group.Interval.Seconds(),
LastEvaluation: g.GetEvaluationTimestamp(),
EvaluationTime: g.GetEvaluationDuration().Seconds(),
+ Limit: g.Group.Limit,
}
for i, rl := range g.ActiveRules {
diff --git a/pkg/ruler/base/api_test.go b/pkg/ruler/base/api_test.go
index 4de5cc3876142..3af59b0fef64d 100644
--- a/pkg/ruler/base/api_test.go
+++ b/pkg/ruler/base/api_test.go
@@ -66,6 +66,7 @@ func TestRuler_rules(t *testing.T) {
},
},
Interval: 60,
+ Limit: 10,
},
},
},
@@ -121,6 +122,7 @@ func TestRuler_rules_special_characters(t *testing.T) {
},
},
Interval: 60,
+ Limit: 10,
},
},
},
@@ -223,6 +225,26 @@ rules:
`,
output: "name: test\ninterval: 15s\nrules:\n - record: up_rule\n expr: up{}\n - alert: up_alert\n expr: sum(up{}) > 1\n for: 30s\n labels:\n test: test\n annotations:\n test: test\n",
},
+ {
+ name: "with a a valid rules file with limit parameter",
+ status: 202,
+ input: `
+name: test
+interval: 15s
+limit: 10
+rules:
+- record: up_rule
+ expr: up{}
+- alert: up_alert
+ expr: sum(up{}) > 1
+ for: 30s
+ annotations:
+ test: test
+ labels:
+ test: test
+`,
+ output: "name: test\ninterval: 15s\nlimit: 10\nrules:\n - record: up_rule\n expr: up{}\n - alert: up_alert\n expr: sum(up{}) > 1\n for: 30s\n labels:\n test: test\n annotations:\n test: test\n",
+ },
}
for _, tt := range tc {
@@ -270,7 +292,7 @@ func TestRuler_DeleteNamespace(t *testing.T) {
router.ServeHTTP(w, req)
require.Equal(t, http.StatusOK, w.Code)
- require.Equal(t, "name: group1\ninterval: 1m\nrules:\n - record: UP_RULE\n expr: up\n - alert: UP_ALERT\n expr: up < 1\n", w.Body.String())
+ require.Equal(t, "name: group1\ninterval: 1m\nlimit: 10\nrules:\n - record: UP_RULE\n expr: up\n - alert: UP_ALERT\n expr: up < 1\n", w.Body.String())
// Delete namespace1
req = requestFor(t, http.MethodDelete, "https://localhost:8080/api/v1/rules/namespace1", nil, "user1")
diff --git a/pkg/ruler/base/manager_test.go b/pkg/ruler/base/manager_test.go
index 847623d7d0ea6..4ab0d7553b825 100644
--- a/pkg/ruler/base/manager_test.go
+++ b/pkg/ruler/base/manager_test.go
@@ -32,6 +32,7 @@ func TestSyncRuleGroups(t *testing.T) {
Namespace: "ns",
Interval: 1 * time.Minute,
User: user,
+ Limit: 10,
},
},
}
diff --git a/pkg/ruler/base/ruler.go b/pkg/ruler/base/ruler.go
index cfde1a23ddd8e..8758a7eaa679d 100644
--- a/pkg/ruler/base/ruler.go
+++ b/pkg/ruler/base/ruler.go
@@ -828,6 +828,7 @@ func (r *Ruler) getLocalRules(userID string) ([]*GroupStateDesc, error) {
Namespace: decodedNamespace,
Interval: interval,
User: userID,
+ Limit: int64(group.Limit()),
},
EvaluationTimestamp: group.GetLastEvaluation(),
diff --git a/pkg/ruler/base/ruler_test.go b/pkg/ruler/base/ruler_test.go
index f270c29178fd5..5a2723eeac32b 100644
--- a/pkg/ruler/base/ruler_test.go
+++ b/pkg/ruler/base/ruler_test.go
@@ -2,6 +2,7 @@ package base
import (
"context"
+ "errors"
"fmt"
"io"
"math/rand"
@@ -405,29 +406,29 @@ func TestGetRules(t *testing.T) {
expectedRules := expectedRulesMap{
"ruler1": map[string]rulespb.RuleGroupList{
"user1": {
- &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "first", Interval: 10 * time.Second},
- &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "second", Interval: 10 * time.Second},
+ &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "first", Interval: 10 * time.Second, Limit: 10},
+ &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "second", Interval: 10 * time.Second, Limit: 10},
},
"user2": {
- &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "third", Interval: 10 * time.Second},
+ &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "third", Interval: 10 * time.Second, Limit: 10},
},
},
"ruler2": map[string]rulespb.RuleGroupList{
"user1": {
- &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "third", Interval: 10 * time.Second},
+ &rulespb.RuleGroupDesc{User: "user1", Namespace: "namespace", Name: "third", Interval: 10 * time.Second, Limit: 10},
},
"user2": {
- &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "first", Interval: 10 * time.Second},
- &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "second", Interval: 10 * time.Second},
+ &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "first", Interval: 10 * time.Second, Limit: 10},
+ &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "second", Interval: 10 * time.Second, Limit: 10},
},
},
"ruler3": map[string]rulespb.RuleGroupList{
"user3": {
- &rulespb.RuleGroupDesc{User: "user3", Namespace: "namespace", Name: "third", Interval: 10 * time.Second},
+ &rulespb.RuleGroupDesc{User: "user3", Namespace: "namespace", Name: "third", Interval: 10 * time.Second, Limit: 10},
},
"user2": {
- &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "forth", Interval: 10 * time.Second},
- &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "fifty", Interval: 10 * time.Second},
+ &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "forth", Interval: 10 * time.Second, Limit: 10},
+ &rulespb.RuleGroupDesc{User: "user2", Namespace: "namespace", Name: "fifty", Interval: 10 * time.Second, Limit: 10},
},
},
}
@@ -1749,6 +1750,7 @@ func TestRecoverAlertsPostOutage(t *testing.T) {
},
},
Interval: interval,
+ Limit: limit,
},
},
}
@@ -1832,3 +1834,140 @@ func TestRecoverAlertsPostOutage(t *testing.T) {
require.Equal(t, promRules.StateFiring, promRules.AlertState(activeAlertRuleRaw.FieldByName("State").Int()))
}
+
+func TestRuleGroupAlertsAndSeriesLimit(t *testing.T) {
+
+ currentTime := time.Now().UTC()
+ seriesStartTime := currentTime.Add(time.Minute * -10)
+ sampleTimeDiff := 5 * time.Minute
+
+ testCases := []struct {
+ name string
+ rule *rulespb.RuleDesc
+ limit int64
+ expectedRuleHealth promRules.RuleHealth
+ expectedError error
+ }{
+ {
+ name: "AlertingRule alerts within limit",
+ rule: getMockRule("HIGH_HTTP_REQUESTS", "http_requests > 50", "", 2*time.Minute),
+ limit: 2,
+ expectedRuleHealth: promRules.HealthGood,
+ expectedError: nil,
+ },
+ {
+ name: "AlertingRule alerts with limit 0",
+ rule: getMockRule("HIGH_HTTP_REQUESTS", "http_requests > 50", "", 2*time.Minute),
+ limit: 0,
+ expectedRuleHealth: promRules.HealthGood,
+ expectedError: nil,
+ },
+ {
+ name: "AlertingRule alerts exceeding limit",
+ rule: getMockRule("HIGH_HTTP_REQUESTS", "http_requests > 50", "", 2*time.Minute),
+ limit: 1,
+ expectedRuleHealth: promRules.HealthBad,
+ expectedError: errors.New("exceeded limit of 1 with 2 alerts"),
+ },
+ {
+ name: "RecordingRule series within limit",
+ rule: getMockRule("", "sum by (instance) (http_requests)", "total_http_requests_per_instance", 0),
+ limit: 2,
+ expectedRuleHealth: promRules.HealthGood,
+ expectedError: nil,
+ },
+ {
+ name: "RecordingRule series with limit 0",
+ rule: getMockRule("", "sum by (instance) (http_requests)", "total_http_requests_per_instance", 0),
+ limit: 0,
+ expectedRuleHealth: promRules.HealthGood,
+ expectedError: nil,
+ },
+ {
+ name: "RecordingRule series exceeding limit",
+ rule: getMockRule("", "sum by (instance) (http_requests)", "total_http_requests_per_instance", 0),
+ limit: 1,
+ expectedRuleHealth: promRules.HealthBad,
+ expectedError: errors.New("exceeded limit of 1 with 2 series"),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(tt *testing.T) {
+
+ mockRuleGroupList := map[string]rulespb.RuleGroupList{
+ "user1": {
+ &rulespb.RuleGroupDesc{
+ Name: "group1",
+ Namespace: "namespace1",
+ User: "user1",
+ Interval: interval,
+ Limit: tc.limit,
+ Rules: []*rulespb.RuleDesc{tc.rule},
+ },
+ },
+ }
+
+ rulerCfg := defaultRulerConfig(t, newMockRuleStore(mockRuleGroupList))
+ m := loki_storage.NewClientMetrics()
+ defer m.Unregister()
+
+ r := buildRuler(tt, rulerCfg, &fakeQuerier{
+ fn: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
+ return series.NewConcreteSeriesSet([]storage.Series{
+ series.NewConcreteSeries(
+ labels.Labels{
+ {Name: labels.MetricName, Value: "http_requests"},
+ {Name: labels.InstanceName, Value: "server1"},
+ },
+ []model.SamplePair{
+ {Timestamp: model.Time(seriesStartTime.Add(sampleTimeDiff).UnixMilli()), Value: 100},
+ {Timestamp: model.Time(currentTime.UnixMilli()), Value: 100},
+ },
+ ),
+ series.NewConcreteSeries(
+ labels.Labels{
+ {Name: labels.MetricName, Value: "http_requests"},
+ {Name: labels.InstanceName, Value: "server2"},
+ },
+ []model.SamplePair{
+ {Timestamp: model.Time(seriesStartTime.Add(sampleTimeDiff).UnixMilli()), Value: 100},
+ {Timestamp: model.Time(currentTime.UnixMilli()), Value: 100},
+ },
+ ),
+ })
+ },
+ }, m, nil)
+
+ r.syncRules(context.Background(), rulerSyncReasonInitial)
+
+ // assert initial state of rule group
+ ruleGroup := r.manager.GetRules("user1")[0]
+ require.Equal(tt, time.Time{}, ruleGroup.GetLastEvaluation())
+ require.Equal(tt, "group1", ruleGroup.Name())
+ require.Equal(tt, 1, len(ruleGroup.Rules()))
+
+ // assert initial state of rule within rule group
+ rule := ruleGroup.Rules()[0]
+ require.Equal(tt, time.Time{}, rule.GetEvaluationTimestamp())
+ require.Equal(tt, promRules.HealthUnknown, rule.Health())
+
+ // evaluate the rule group the first time and assert
+ ctx := user.InjectOrgID(context.Background(), "user1")
+ ruleGroup.Eval(ctx, currentTime)
+
+ require.Equal(tt, tc.expectedRuleHealth, rule.Health())
+ require.Equal(tt, tc.expectedError, rule.LastError())
+ })
+
+ }
+}
+
+func getMockRule(alert, expr, record string, forDuration time.Duration) *rulespb.RuleDesc {
+ return &rulespb.RuleDesc{
+ Alert: alert,
+ Expr: expr,
+ For: forDuration,
+ Record: record,
+ }
+}
diff --git a/pkg/ruler/base/store_mock_test.go b/pkg/ruler/base/store_mock_test.go
index 9b41645f86f9e..4cbecfee30cc0 100644
--- a/pkg/ruler/base/store_mock_test.go
+++ b/pkg/ruler/base/store_mock_test.go
@@ -19,6 +19,7 @@ type mockRuleStore struct {
var (
delim = "/"
interval, _ = time.ParseDuration("1m")
+ limit = int64(10)
mockRulesNamespaces = map[string]rulespb.RuleGroupList{
"user1": {
&rulespb.RuleGroupDesc{
@@ -36,6 +37,7 @@ var (
},
},
Interval: interval,
+ Limit: limit,
},
&rulespb.RuleGroupDesc{
Name: "fail",
@@ -52,6 +54,7 @@ var (
},
},
Interval: interval,
+ Limit: limit,
},
},
}
@@ -72,6 +75,7 @@ var (
},
},
Interval: interval,
+ Limit: limit,
},
},
"user2": {
@@ -86,6 +90,7 @@ var (
},
},
Interval: interval,
+ Limit: limit,
},
},
}
@@ -107,6 +112,7 @@ var (
},
},
Interval: interval,
+ Limit: limit,
},
},
}
@@ -141,6 +147,7 @@ func (m *mockRuleStore) ListAllRuleGroups(_ context.Context) (map[string]rulespb
Name: r.Name,
User: k,
Interval: r.Interval,
+ Limit: r.Limit,
Rules: r.Rules,
})
}
@@ -164,6 +171,7 @@ func (m *mockRuleStore) ListRuleGroupsForUserAndNamespace(_ context.Context, use
Name: r.Name,
User: userID,
Interval: r.Interval,
+ Limit: r.Limit,
Rules: r.Rules,
})
}
diff --git a/pkg/ruler/rulespb/compat.go b/pkg/ruler/rulespb/compat.go
index db1aca113c7ef..a0da3dc014f46 100644
--- a/pkg/ruler/rulespb/compat.go
+++ b/pkg/ruler/rulespb/compat.go
@@ -19,6 +19,7 @@ func ToProto(user string, namespace string, rl rulefmt.RuleGroup) *RuleGroupDesc
Interval: time.Duration(rl.Interval),
Rules: formattedRuleToProto(rl.Rules),
User: user,
+ Limit: int64(rl.Limit),
}
return &rg
}
@@ -45,6 +46,7 @@ func FromProto(rg *RuleGroupDesc) rulefmt.RuleGroup {
Name: rg.GetName(),
Interval: model.Duration(rg.Interval),
Rules: make([]rulefmt.RuleNode, len(rg.GetRules())),
+ Limit: int(rg.GetLimit()),
}
for i, rl := range rg.GetRules() {
diff --git a/pkg/ruler/rulespb/rules.pb.go b/pkg/ruler/rulespb/rules.pb.go
index fd549dd7a9a72..ead0d482791be 100644
--- a/pkg/ruler/rulespb/rules.pb.go
+++ b/pkg/ruler/rulespb/rules.pb.go
@@ -44,6 +44,7 @@ type RuleGroupDesc struct {
// to create custom `ManagerOpts` based on rule configs which can then be passed
// to the Prometheus Manager.
Options []*types.Any `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"`
+ Limit int64 `protobuf:"varint,10,opt,name=limit,proto3" json:"limit,omitempty"`
}
func (m *RuleGroupDesc) Reset() { *m = RuleGroupDesc{} }
@@ -120,6 +121,13 @@ func (m *RuleGroupDesc) GetOptions() []*types.Any {
return nil
}
+func (m *RuleGroupDesc) GetLimit() int64 {
+ if m != nil {
+ return m.Limit
+ }
+ return 0
+}
+
// RuleDesc is a proto representation of a Prometheus Rule
type RuleDesc struct {
Expr string `protobuf:"bytes,1,opt,name=expr,proto3" json:"expr,omitempty"`
@@ -198,38 +206,39 @@ func init() {
func init() { proto.RegisterFile("pkg/ruler/rulespb/rules.proto", fileDescriptor_dd3ef3757f506fba) }
var fileDescriptor_dd3ef3757f506fba = []byte{
- // 487 bytes of a gzipped FileDescriptorProto
+ // 501 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x41, 0x6f, 0xd3, 0x30,
- 0x18, 0x8d, 0xd7, 0x34, 0x4b, 0x5d, 0x4d, 0x54, 0x56, 0x85, 0xd2, 0x01, 0x6e, 0x35, 0x09, 0xa9,
- 0x5c, 0x1c, 0x31, 0xc4, 0x81, 0x13, 0x5a, 0x35, 0x09, 0xa9, 0xda, 0x01, 0xe5, 0xc8, 0x05, 0x39,
- 0xa9, 0x6b, 0xa2, 0x79, 0x71, 0xe4, 0x24, 0x13, 0xbd, 0xf1, 0x13, 0x38, 0xf2, 0x13, 0xf8, 0x29,
- 0x3b, 0xf6, 0x38, 0x71, 0x18, 0x34, 0xbd, 0x70, 0x63, 0xff, 0x00, 0x64, 0x3b, 0x19, 0x03, 0x24,
- 0xc4, 0x85, 0x4b, 0xfc, 0x3e, 0x3f, 0x7f, 0x7e, 0xef, 0x7d, 0x56, 0xe0, 0x83, 0xfc, 0x94, 0x87,
- 0xaa, 0x12, 0x4c, 0x99, 0x6f, 0x91, 0xc7, 0x76, 0x25, 0xb9, 0x92, 0xa5, 0x44, 0x5d, 0x53, 0xec,
- 0x0f, 0xb9, 0xe4, 0xd2, 0xec, 0x84, 0x1a, 0x59, 0x72, 0x7f, 0xc4, 0xa5, 0xe4, 0x82, 0x85, 0xa6,
- 0x8a, 0xab, 0x65, 0x48, 0xb3, 0x55, 0x43, 0xe1, 0xdf, 0xa9, 0x45, 0xa5, 0x68, 0x99, 0xca, 0xac,
- 0xe1, 0xef, 0x69, 0x59, 0x21, 0xb9, 0xbd, 0xb3, 0x05, 0x96, 0x3c, 0xf8, 0x0e, 0xe0, 0x5e, 0x54,
- 0x09, 0xf6, 0x42, 0xc9, 0x2a, 0x3f, 0x66, 0x45, 0x82, 0x10, 0x74, 0x33, 0x7a, 0xc6, 0x02, 0x30,
- 0x01, 0xd3, 0x5e, 0x64, 0x30, 0xba, 0x0f, 0x7b, 0x7a, 0x2d, 0x72, 0x9a, 0xb0, 0x60, 0xc7, 0x10,
- 0x3f, 0x37, 0xd0, 0x73, 0xe8, 0xa7, 0x59, 0xc9, 0xd4, 0x39, 0x15, 0x41, 0x67, 0x02, 0xa6, 0xfd,
- 0xc3, 0x11, 0xb1, 0x9e, 0x48, 0xeb, 0x89, 0x1c, 0x37, 0x9e, 0x66, 0xfe, 0xc5, 0xd5, 0xd8, 0xf9,
- 0xf0, 0x79, 0x0c, 0xa2, 0x9b, 0x26, 0xf4, 0x10, 0xda, 0xec, 0x81, 0x3b, 0xe9, 0x4c, 0xfb, 0x87,
- 0x77, 0x88, 0x1d, 0x8b, 0xf6, 0xa5, 0x2d, 0x45, 0x96, 0xd5, 0xce, 0xaa, 0x82, 0xa9, 0xc0, 0xb3,
- 0xce, 0x34, 0x46, 0x04, 0xee, 0xca, 0x5c, 0x5f, 0x5c, 0x04, 0x3d, 0xd3, 0x3c, 0xfc, 0x43, 0xfa,
- 0x28, 0x5b, 0x45, 0xed, 0xa1, 0xb9, 0xeb, 0x77, 0x07, 0xde, 0xdc, 0xf5, 0x77, 0x07, 0xfe, 0xdc,
- 0xf5, 0xfd, 0x41, 0xef, 0xe0, 0xdb, 0x0e, 0xf4, 0x5b, 0x25, 0x2d, 0xc1, 0xde, 0xe6, 0xaa, 0x0d,
- 0xaf, 0x31, 0xba, 0x0b, 0x3d, 0xc5, 0x12, 0xa9, 0x16, 0x4d, 0xf2, 0xa6, 0x42, 0x43, 0xd8, 0xa5,
- 0x82, 0xa9, 0xd2, 0x64, 0xee, 0x45, 0xb6, 0x40, 0x4f, 0x61, 0x67, 0x29, 0x55, 0xe0, 0xfe, 0xfb,
- 0x1c, 0xf4, 0x79, 0x24, 0xa0, 0x27, 0x68, 0xcc, 0x44, 0x11, 0x74, 0x4d, 0x8c, 0x11, 0xb9, 0x79,
- 0xa8, 0x13, 0xc6, 0x69, 0xb2, 0x3a, 0xd1, 0xec, 0x4b, 0x9a, 0xaa, 0xd9, 0x33, 0xdd, 0xf9, 0xe9,
- 0x6a, 0xfc, 0x98, 0xa7, 0xe5, 0x9b, 0x2a, 0x26, 0x89, 0x3c, 0x0b, 0xb9, 0xa2, 0x4b, 0x9a, 0xd1,
- 0x50, 0xc8, 0xd3, 0x34, 0xbc, 0xfd, 0xde, 0xc4, 0xf4, 0x1d, 0x2d, 0x68, 0x5e, 0x32, 0x15, 0x35,
- 0x1a, 0xe8, 0x1c, 0xf6, 0x69, 0x96, 0xc9, 0x92, 0xda, 0xc9, 0x79, 0xff, 0x51, 0xf2, 0xb6, 0x90,
- 0x99, 0xfb, 0xde, 0xec, 0xf5, 0x7a, 0x83, 0x9d, 0xcb, 0x0d, 0x76, 0xae, 0x37, 0x18, 0xbc, 0xab,
- 0x31, 0xf8, 0x58, 0x63, 0x70, 0x51, 0x63, 0xb0, 0xae, 0x31, 0xf8, 0x52, 0x63, 0xf0, 0xb5, 0xc6,
- 0xce, 0x75, 0x8d, 0xc1, 0xfb, 0x2d, 0x76, 0xd6, 0x5b, 0xec, 0x5c, 0x6e, 0xb1, 0xf3, 0xea, 0xd1,
- 0xdf, 0xb4, 0x7f, 0xf9, 0xab, 0x62, 0xcf, 0xf8, 0x78, 0xf2, 0x23, 0x00, 0x00, 0xff, 0xff, 0x37,
- 0xde, 0xfd, 0x7d, 0x71, 0x03, 0x00, 0x00,
+ 0x18, 0x8d, 0xdb, 0x34, 0x4b, 0x5c, 0x4d, 0x54, 0x56, 0x85, 0xd2, 0x01, 0x6e, 0x35, 0x09, 0xa9,
+ 0x5c, 0x12, 0x31, 0xc4, 0x81, 0x13, 0x5a, 0x35, 0x09, 0xa9, 0xda, 0x01, 0xe5, 0xc8, 0x05, 0x39,
+ 0xa9, 0x6b, 0xa2, 0xb9, 0x71, 0xe4, 0x24, 0x13, 0xbd, 0xf1, 0x13, 0xb8, 0x20, 0xf1, 0x13, 0xf8,
+ 0x29, 0x3b, 0xf6, 0x38, 0x71, 0x18, 0x34, 0xbd, 0x70, 0x63, 0x3f, 0x01, 0xd9, 0x4e, 0xc6, 0x00,
+ 0x09, 0x71, 0xe1, 0x12, 0x7f, 0xcf, 0xcf, 0x9f, 0xdf, 0xfb, 0x5e, 0x12, 0xf8, 0x20, 0x3f, 0x63,
+ 0xa1, 0xac, 0x38, 0x95, 0xfa, 0x59, 0xe4, 0xb1, 0x59, 0x83, 0x5c, 0x8a, 0x52, 0xa0, 0x9e, 0x06,
+ 0x07, 0x43, 0x26, 0x98, 0xd0, 0x3b, 0xa1, 0xaa, 0x0c, 0x79, 0x30, 0x62, 0x42, 0x30, 0x4e, 0x43,
+ 0x8d, 0xe2, 0x6a, 0x19, 0x92, 0x6c, 0xdd, 0x50, 0xf8, 0x77, 0x6a, 0x51, 0x49, 0x52, 0xa6, 0x22,
+ 0x6b, 0xf8, 0x7b, 0x4a, 0x96, 0x0b, 0x66, 0xee, 0x6c, 0x0b, 0x43, 0x1e, 0x7e, 0xe8, 0xc0, 0xfd,
+ 0xa8, 0xe2, 0xf4, 0x85, 0x14, 0x55, 0x7e, 0x42, 0x8b, 0x04, 0x21, 0x68, 0x67, 0x64, 0x45, 0x7d,
+ 0x30, 0x01, 0x53, 0x2f, 0xd2, 0x35, 0xba, 0x0f, 0x3d, 0xb5, 0x16, 0x39, 0x49, 0xa8, 0xdf, 0xd1,
+ 0xc4, 0xcf, 0x0d, 0xf4, 0x1c, 0xba, 0x69, 0x56, 0x52, 0x79, 0x4e, 0xb8, 0xdf, 0x9d, 0x80, 0x69,
+ 0xff, 0x68, 0x14, 0x18, 0x4f, 0x41, 0xeb, 0x29, 0x38, 0x69, 0x3c, 0xcd, 0xdc, 0x8b, 0xab, 0xb1,
+ 0xf5, 0xf1, 0xcb, 0x18, 0x44, 0x37, 0x4d, 0xe8, 0x21, 0x34, 0xb3, 0xfb, 0xf6, 0xa4, 0x3b, 0xed,
+ 0x1f, 0xdd, 0x09, 0x4c, 0x2c, 0xca, 0x97, 0xb2, 0x14, 0x19, 0x56, 0x39, 0xab, 0x0a, 0x2a, 0x7d,
+ 0xc7, 0x38, 0x53, 0x35, 0x0a, 0xe0, 0x9e, 0xc8, 0xd5, 0xc5, 0x85, 0xef, 0xe9, 0xe6, 0xe1, 0x1f,
+ 0xd2, 0xc7, 0xd9, 0x3a, 0x6a, 0x0f, 0xa1, 0x21, 0xec, 0xf1, 0x74, 0x95, 0x96, 0x3e, 0x9c, 0x80,
+ 0x69, 0x37, 0x32, 0x60, 0x6e, 0xbb, 0xbd, 0x81, 0x33, 0xb7, 0xdd, 0xbd, 0x81, 0x3b, 0xb7, 0x5d,
+ 0x77, 0xe0, 0x1d, 0x7e, 0xef, 0x40, 0xb7, 0xd5, 0x57, 0xc2, 0xf4, 0x6d, 0x2e, 0xdb, 0x48, 0x54,
+ 0x8d, 0xee, 0x42, 0x47, 0xd2, 0x44, 0xc8, 0x45, 0x93, 0x47, 0x83, 0x94, 0x00, 0xe1, 0x54, 0x96,
+ 0x3a, 0x09, 0x2f, 0x32, 0x00, 0x3d, 0x85, 0xdd, 0xa5, 0x90, 0xbe, 0xfd, 0xef, 0xe9, 0xa8, 0xf3,
+ 0x88, 0x43, 0x87, 0x93, 0x98, 0xf2, 0xc2, 0xef, 0xe9, 0xe1, 0x46, 0xc1, 0xcd, 0xeb, 0x3b, 0xa5,
+ 0x8c, 0x24, 0xeb, 0x53, 0xc5, 0xbe, 0x24, 0xa9, 0x9c, 0x3d, 0x53, 0x9d, 0x9f, 0xaf, 0xc6, 0x8f,
+ 0x59, 0x5a, 0xbe, 0xa9, 0xe2, 0x20, 0x11, 0xab, 0x90, 0x49, 0xb2, 0x24, 0x19, 0x09, 0xb9, 0x38,
+ 0x4b, 0xc3, 0xdb, 0x5f, 0x41, 0xa0, 0xfb, 0x8e, 0x17, 0x24, 0x2f, 0xa9, 0x8c, 0x1a, 0x0d, 0x74,
+ 0x0e, 0xfb, 0x24, 0xcb, 0x44, 0x49, 0x4c, 0x9e, 0xce, 0x7f, 0x94, 0xbc, 0x2d, 0xa4, 0x73, 0xdf,
+ 0x9f, 0xbd, 0xde, 0x6c, 0xb1, 0x75, 0xb9, 0xc5, 0xd6, 0xf5, 0x16, 0x83, 0x77, 0x35, 0x06, 0x9f,
+ 0x6a, 0x0c, 0x2e, 0x6a, 0x0c, 0x36, 0x35, 0x06, 0x5f, 0x6b, 0x0c, 0xbe, 0xd5, 0xd8, 0xba, 0xae,
+ 0x31, 0x78, 0xbf, 0xc3, 0xd6, 0x66, 0x87, 0xad, 0xcb, 0x1d, 0xb6, 0x5e, 0x3d, 0xfa, 0x9b, 0xf6,
+ 0x2f, 0xff, 0x5a, 0xec, 0x68, 0x1f, 0x4f, 0x7e, 0x04, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf3, 0x96,
+ 0x82, 0x87, 0x03, 0x00, 0x00,
}
func (this *RuleGroupDesc) Equal(that interface{}) bool {
@@ -279,6 +288,9 @@ func (this *RuleGroupDesc) Equal(that interface{}) bool {
return false
}
}
+ if this.Limit != that1.Limit {
+ return false
+ }
return true
}
func (this *RuleDesc) Equal(that interface{}) bool {
@@ -334,7 +346,7 @@ func (this *RuleGroupDesc) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 10)
+ s := make([]string, 0, 11)
s = append(s, "&rulespb.RuleGroupDesc{")
s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n")
@@ -346,6 +358,7 @@ func (this *RuleGroupDesc) GoString() string {
if this.Options != nil {
s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
}
+ s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -392,6 +405,11 @@ func (m *RuleGroupDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Limit != 0 {
+ i = encodeVarintRules(dAtA, i, uint64(m.Limit))
+ i--
+ dAtA[i] = 0x50
+ }
if len(m.Options) > 0 {
for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -575,6 +593,9 @@ func (m *RuleGroupDesc) Size() (n int) {
n += 1 + l + sovRules(uint64(l))
}
}
+ if m.Limit != 0 {
+ n += 1 + sovRules(uint64(m.Limit))
+ }
return n
}
@@ -640,6 +661,7 @@ func (this *RuleGroupDesc) String() string {
`Rules:` + repeatedStringForRules + `,`,
`User:` + fmt.Sprintf("%v", this.User) + `,`,
`Options:` + repeatedStringForOptions + `,`,
+ `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
`}`,
}, "")
return s
@@ -893,6 +915,25 @@ func (m *RuleGroupDesc) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ }
+ m.Limit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRules
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Limit |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipRules(dAtA[iNdEx:])
diff --git a/pkg/ruler/rulespb/rules.proto b/pkg/ruler/rulespb/rules.proto
index 475843706160b..be001d238d1d0 100644
--- a/pkg/ruler/rulespb/rules.proto
+++ b/pkg/ruler/rulespb/rules.proto
@@ -27,6 +27,7 @@ message RuleGroupDesc {
// to create custom `ManagerOpts` based on rule configs which can then be passed
// to the Prometheus Manager.
repeated google.protobuf.Any options = 9;
+ int64 limit = 10;
}
// RuleDesc is a proto representation of a Prometheus Rule
diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go
index 3671313d4fb6b..360b7aedb221b 100644
--- a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go
+++ b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go
@@ -109,10 +109,10 @@ func TestLoadRules(t *testing.T) {
{user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "first testGroup", Interval: model.Duration(time.Minute), Rules: []rulefmt.RuleNode{{
For: model.Duration(5 * time.Minute),
Labels: map[string]string{"label1": "value1"},
- }}}},
- {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "second testGroup", Interval: model.Duration(2 * time.Minute)}},
- {user: "user1", namespace: "world", ruleGroup: rulefmt.RuleGroup{Name: "another namespace testGroup", Interval: model.Duration(1 * time.Hour)}},
- {user: "user2", namespace: "+-!@#$%. ", ruleGroup: rulefmt.RuleGroup{Name: "different user", Interval: model.Duration(5 * time.Minute)}},
+ }}, Limit: 10}},
+ {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "second testGroup", Interval: model.Duration(2 * time.Minute), Limit: 0}},
+ {user: "user1", namespace: "world", ruleGroup: rulefmt.RuleGroup{Name: "another namespace testGroup", Interval: model.Duration(1 * time.Hour), Limit: 1}},
+ {user: "user2", namespace: "+-!@#$%. ", ruleGroup: rulefmt.RuleGroup{Name: "different user", Interval: model.Duration(5 * time.Minute), Limit: -1}},
}
for _, g := range groups {
@@ -150,13 +150,13 @@ func TestLoadRules(t *testing.T) {
For: 5 * time.Minute,
Labels: []logproto.LabelAdapter{{Name: "label1", Value: "value1"}},
},
- }},
- {User: "user1", Namespace: "hello", Name: "second testGroup", Interval: 2 * time.Minute},
- {User: "user1", Namespace: "world", Name: "another namespace testGroup", Interval: 1 * time.Hour},
+ }, Limit: 10},
+ {User: "user1", Namespace: "hello", Name: "second testGroup", Interval: 2 * time.Minute, Limit: 0},
+ {User: "user1", Namespace: "world", Name: "another namespace testGroup", Interval: 1 * time.Hour, Limit: 1},
}, allGroupsMap["user1"])
require.ElementsMatch(t, []*rulespb.RuleGroupDesc{
- {User: "user2", Namespace: "+-!@#$%. ", Name: "different user", Interval: 5 * time.Minute},
+ {User: "user2", Namespace: "+-!@#$%. ", Name: "different user", Interval: 5 * time.Minute, Limit: -1},
}, allGroupsMap["user2"])
}
|
ruler
|
add limit parameter in rulegroup (#10109)
|
c158b2c5f5107dc710575c05f5db1889e151114d
|
2022-04-05 22:14:23
|
Ed Welch
|
loki: Revert #4845 which changed the format of errors from the API (#5772)
| false
|
diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md
index 3d1b862f6889d..e8d1ddb7e4efe 100644
--- a/docs/sources/upgrading/_index.md
+++ b/docs/sources/upgrading/_index.md
@@ -84,22 +84,6 @@ Meanwhile, the legacy format is a string in the following format:
[ ANNOTATIONS <label set> ]
```
-#### Error responses from API
-
-The body of HTTP error responses from API endpoints changed from plain text to
-JSON. The `Content-Type` header was previously already set incorrectly to
-`application/json`. Therefore returning JSON fixes this inconsistency.
-
-The response body has the following schema:
-
-```json
-{
- "code": <http status code>,
- "message": "<error message>",
- "status": "error"
-}
-```
-
#### Changes to default configuration values
* `parallelise_shardable_queries` under the `query_range` config now defaults to `true`.
diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go
index dbbdb17470121..f9224d8c43116 100644
--- a/pkg/distributor/http.go
+++ b/pkg/distributor/http.go
@@ -13,7 +13,6 @@ import (
"github.com/grafana/loki/pkg/loghttp/push"
util_log "github.com/grafana/loki/pkg/util/log"
- serverutil "github.com/grafana/loki/pkg/util/server"
"github.com/grafana/loki/pkg/validation"
)
@@ -30,7 +29,7 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) {
"err", err,
)
}
- serverutil.JSONError(w, http.StatusBadRequest, err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -66,7 +65,7 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) {
"err", body,
)
}
- serverutil.JSONError(w, int(resp.Code), body)
+ http.Error(w, body, int(resp.Code))
} else {
if d.tenantConfigs.LogPushRequest(userID) {
level.Debug(logger).Log(
@@ -75,7 +74,7 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) {
"err", err.Error(),
)
}
- serverutil.JSONError(w, http.StatusInternalServerError, err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index d5f168a413660..17cdad07abae9 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -343,7 +343,6 @@ func (t *Loki) Run(opts RunOpts) error {
t.serviceMap = serviceMap
t.Server.HTTP.Path("/services").Methods("GET").Handler(http.HandlerFunc(t.servicesHandler))
- t.Server.HTTP.NotFoundHandler = http.HandlerFunc(serverutil.NotFoundHandler)
// get all services, create service manager and tell it to start
var servs []services.Service
diff --git a/pkg/lokifrontend/frontend/transport/handler.go b/pkg/lokifrontend/frontend/transport/handler.go
index 03e83d9ff964a..7f42084b43dfc 100644
--- a/pkg/lokifrontend/frontend/transport/handler.go
+++ b/pkg/lokifrontend/frontend/transport/handler.go
@@ -17,13 +17,14 @@ import (
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/weaveworks/common/httpgrpc"
+ "github.com/weaveworks/common/httpgrpc/server"
"github.com/grafana/dskit/tenant"
querier_stats "github.com/grafana/loki/pkg/querier/stats"
"github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
- serverutil "github.com/grafana/loki/pkg/util/server"
)
const (
@@ -32,6 +33,12 @@ const (
ServiceTimingHeaderName = "Server-Timing"
)
+var (
+ errCanceled = httpgrpc.Errorf(StatusClientClosedRequest, context.Canceled.Error())
+ errDeadlineExceeded = httpgrpc.Errorf(http.StatusGatewayTimeout, context.DeadlineExceeded.Error())
+ errRequestEntityTooLarge = httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "http: request body too large")
+)
+
// Config for a Handler.
type HandlerConfig struct {
LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"`
@@ -220,11 +227,17 @@ func formatQueryString(queryString url.Values) (fields []interface{}) {
}
func writeError(w http.ResponseWriter, err error) {
- if util.IsRequestBodyTooLarge(err) {
- serverutil.JSONError(w, http.StatusRequestEntityTooLarge, "http: request body too large")
- return
+ switch err {
+ case context.Canceled:
+ err = errCanceled
+ case context.DeadlineExceeded:
+ err = errDeadlineExceeded
+ default:
+ if util.IsRequestBodyTooLarge(err) {
+ err = errRequestEntityTooLarge
+ }
}
- serverutil.WriteError(err, w)
+ server.WriteError(w, err)
}
func writeServiceTimingHeader(queryResponseTime time.Duration, headers http.Header, stats *querier_stats.Stats) {
diff --git a/pkg/storage/stores/shipper/compactor/deletion/request_handler.go b/pkg/storage/stores/shipper/compactor/deletion/request_handler.go
index e61071afbb756..95fee6886d30e 100644
--- a/pkg/storage/stores/shipper/compactor/deletion/request_handler.go
+++ b/pkg/storage/stores/shipper/compactor/deletion/request_handler.go
@@ -2,6 +2,7 @@ package deletion
import (
"encoding/json"
+ "fmt"
"net/http"
"time"
@@ -14,7 +15,6 @@ import (
"github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
- serverutil "github.com/grafana/loki/pkg/util/server"
)
// DeleteRequestHandler provides handlers for delete requests
@@ -40,21 +40,21 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r
ctx := r.Context()
userID, err := tenant.TenantID(ctx)
if err != nil {
- serverutil.JSONError(w, http.StatusBadRequest, err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
return
}
params := r.URL.Query()
match := params["match[]"]
if len(match) == 0 {
- serverutil.JSONError(w, http.StatusBadRequest, "selectors not set")
+ http.Error(w, "selectors not set", http.StatusBadRequest)
return
}
for i := range match {
_, err := parser.ParseMetricSelector(match[i])
if err != nil {
- serverutil.JSONError(w, http.StatusBadRequest, err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
@@ -64,7 +64,7 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r
if startParam != "" {
startTime, err = util.ParseTime(startParam)
if err != nil {
- serverutil.JSONError(w, http.StatusBadRequest, err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
@@ -75,12 +75,12 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r
if endParam != "" {
endTime, err = util.ParseTime(endParam)
if err != nil {
- serverutil.JSONError(w, http.StatusBadRequest, err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if endTime > int64(model.Now()) {
- serverutil.JSONError(w, http.StatusBadRequest, "deletes in future not allowed")
+ http.Error(w, "deletes in future not allowed", http.StatusBadRequest)
return
}
}
@@ -92,7 +92,7 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r
if err := dm.deleteRequestsStore.AddDeleteRequest(ctx, userID, model.Time(startTime), model.Time(endTime), match); err != nil {
level.Error(util_log.Logger).Log("msg", "error adding delete request to the store", "err", err)
- serverutil.JSONError(w, http.StatusInternalServerError, err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -105,20 +105,20 @@ func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWrite
ctx := r.Context()
userID, err := tenant.TenantID(ctx)
if err != nil {
- serverutil.JSONError(w, http.StatusBadRequest, err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
return
}
deleteRequests, err := dm.deleteRequestsStore.GetAllDeleteRequestsForUser(ctx, userID)
if err != nil {
level.Error(util_log.Logger).Log("msg", "error getting delete requests from the store", "err", err)
- serverutil.JSONError(w, http.StatusInternalServerError, err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := json.NewEncoder(w).Encode(deleteRequests); err != nil {
level.Error(util_log.Logger).Log("msg", "error marshalling response", "err", err)
- serverutil.JSONError(w, http.StatusInternalServerError, "error marshalling response: %v", err)
+ http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError)
}
}
@@ -127,7 +127,7 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter
ctx := r.Context()
userID, err := tenant.TenantID(ctx)
if err != nil {
- serverutil.JSONError(w, http.StatusBadRequest, err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -137,28 +137,28 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter
deleteRequest, err := dm.deleteRequestsStore.GetDeleteRequest(ctx, userID, requestID)
if err != nil {
level.Error(util_log.Logger).Log("msg", "error getting delete request from the store", "err", err)
- serverutil.JSONError(w, http.StatusInternalServerError, err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if deleteRequest == nil {
- serverutil.JSONError(w, http.StatusBadRequest, "could not find delete request with given id")
+ http.Error(w, "could not find delete request with given id", http.StatusBadRequest)
return
}
if deleteRequest.Status != StatusReceived {
- serverutil.JSONError(w, http.StatusBadRequest, "deletion of request which is in process or already processed is not allowed")
+ http.Error(w, "deletion of request which is in process or already processed is not allowed", http.StatusBadRequest)
return
}
if deleteRequest.CreatedAt.Add(dm.deleteRequestCancelPeriod).Before(model.Now()) {
- serverutil.JSONError(w, http.StatusBadRequest, "deletion of request past the deadline of %s since its creation is not allowed", dm.deleteRequestCancelPeriod.String())
+ http.Error(w, fmt.Sprintf("deletion of request past the deadline of %s since its creation is not allowed", dm.deleteRequestCancelPeriod.String()), http.StatusBadRequest)
return
}
if err := dm.deleteRequestsStore.RemoveDeleteRequest(ctx, userID, requestID, deleteRequest.CreatedAt, deleteRequest.StartTime, deleteRequest.EndTime); err != nil {
level.Error(util_log.Logger).Log("msg", "error cancelling the delete request", "err", err)
- serverutil.JSONError(w, http.StatusInternalServerError, err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
diff --git a/pkg/util/server/error.go b/pkg/util/server/error.go
index 81b59a5e4eeb2..681e4a13345b7 100644
--- a/pkg/util/server/error.go
+++ b/pkg/util/server/error.go
@@ -2,9 +2,7 @@ package server
import (
"context"
- "encoding/json"
"errors"
- "fmt"
"net/http"
"google.golang.org/grpc/codes"
@@ -28,26 +26,6 @@ const (
ErrDeadlineExceeded = "Request timed out, decrease the duration of the request or add more label matchers (prefer exact match over regex match) to reduce the amount of data processed."
)
-type ErrorResponseBody struct {
- Code int `json:"code"`
- Status string `json:"status"`
- Message string `json:"message"`
-}
-
-func NotFoundHandler(w http.ResponseWriter, r *http.Request) {
- JSONError(w, 404, "not found")
-}
-
-func JSONError(w http.ResponseWriter, code int, message string, args ...interface{}) {
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- w.WriteHeader(code)
- _ = json.NewEncoder(w).Encode(ErrorResponseBody{
- Code: code,
- Status: "error",
- Message: fmt.Sprintf(message, args...),
- })
-}
-
// WriteError write a go error with the correct status code.
func WriteError(err error, w http.ResponseWriter) {
var (
@@ -57,11 +35,11 @@ func WriteError(err error, w http.ResponseWriter) {
me, ok := err.(util.MultiError)
if ok && me.Is(context.Canceled) {
- JSONError(w, StatusClientClosedRequest, ErrClientCanceled)
+ http.Error(w, ErrClientCanceled, StatusClientClosedRequest)
return
}
if ok && me.IsDeadlineExceeded() {
- JSONError(w, http.StatusGatewayTimeout, ErrDeadlineExceeded)
+ http.Error(w, ErrDeadlineExceeded, http.StatusGatewayTimeout)
return
}
@@ -69,19 +47,21 @@ func WriteError(err error, w http.ResponseWriter) {
switch {
case errors.Is(err, context.Canceled) ||
(errors.As(err, &promErr) && errors.Is(promErr.Err, context.Canceled)):
- JSONError(w, StatusClientClosedRequest, ErrClientCanceled)
+ http.Error(w, ErrClientCanceled, StatusClientClosedRequest)
case errors.Is(err, context.DeadlineExceeded) ||
(isRPC && s.Code() == codes.DeadlineExceeded):
- JSONError(w, http.StatusGatewayTimeout, ErrDeadlineExceeded)
- case errors.As(err, &queryErr),
- errors.Is(err, logqlmodel.ErrLimit) || errors.Is(err, logqlmodel.ErrParse) || errors.Is(err, logqlmodel.ErrPipeline),
- errors.Is(err, user.ErrNoOrgID):
- JSONError(w, http.StatusBadRequest, err.Error())
+ http.Error(w, ErrDeadlineExceeded, http.StatusGatewayTimeout)
+ case errors.As(err, &queryErr):
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ case errors.Is(err, logqlmodel.ErrLimit) || errors.Is(err, logqlmodel.ErrParse) || errors.Is(err, logqlmodel.ErrPipeline):
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ case errors.Is(err, user.ErrNoOrgID):
+ http.Error(w, err.Error(), http.StatusBadRequest)
default:
if grpcErr, ok := httpgrpc.HTTPResponseFromError(err); ok {
- JSONError(w, int(grpcErr.Code), string(grpcErr.Body))
+ http.Error(w, string(grpcErr.Body), int(grpcErr.Code))
return
}
- JSONError(w, http.StatusInternalServerError, err.Error())
+ http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
diff --git a/pkg/util/server/error_test.go b/pkg/util/server/error_test.go
index 8e9e48cc022bd..0297e01e0ffa9 100644
--- a/pkg/util/server/error_test.go
+++ b/pkg/util/server/error_test.go
@@ -2,9 +2,9 @@ package server
import (
"context"
- "encoding/json"
"errors"
"fmt"
+ "io/ioutil"
"net/http"
"net/http/httptest"
"testing"
@@ -27,86 +27,39 @@ func Test_writeError(t *testing.T) {
name string
err error
- expectedMsg string
+ msg string
expectedStatus int
}{
{"cancelled", context.Canceled, ErrClientCanceled, StatusClientClosedRequest},
{"cancelled multi", util.MultiError{context.Canceled, context.Canceled}, ErrClientCanceled, StatusClientClosedRequest},
- {
- "rpc cancelled",
- status.New(codes.Canceled, context.Canceled.Error()).Err(),
- "rpc error: code = Canceled desc = context canceled",
- http.StatusInternalServerError,
- },
- {
- "rpc cancelled multi",
- util.MultiError{status.New(codes.Canceled, context.Canceled.Error()).Err(), status.New(codes.Canceled, context.Canceled.Error()).Err()},
- "2 errors: rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled",
- http.StatusInternalServerError,
- },
- {
- "mixed context and rpc cancelled",
- util.MultiError{context.Canceled, status.New(codes.Canceled, context.Canceled.Error()).Err()},
- "2 errors: context canceled; rpc error: code = Canceled desc = context canceled",
- http.StatusInternalServerError,
- },
- {
- "mixed context, rpc cancelled and another",
- util.MultiError{errors.New("standard error"), context.Canceled, status.New(codes.Canceled, context.Canceled.Error()).Err()},
- "3 errors: standard error; context canceled; rpc error: code = Canceled desc = context canceled",
- http.StatusInternalServerError,
- },
+ {"rpc cancelled", status.New(codes.Canceled, context.Canceled.Error()).Err(), "rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
+ {"rpc cancelled multi", util.MultiError{status.New(codes.Canceled, context.Canceled.Error()).Err(), status.New(codes.Canceled, context.Canceled.Error()).Err()}, "2 errors: rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
+ {"mixed context and rpc cancelled", util.MultiError{context.Canceled, status.New(codes.Canceled, context.Canceled.Error()).Err()}, "2 errors: context canceled; rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
+ {"mixed context, rpc cancelled and another", util.MultiError{errors.New("standard error"), context.Canceled, status.New(codes.Canceled, context.Canceled.Error()).Err()}, "3 errors: standard error; context canceled; rpc error: code = Canceled desc = context canceled", http.StatusInternalServerError},
{"cancelled storage", promql.ErrStorage{Err: context.Canceled}, ErrClientCanceled, StatusClientClosedRequest},
{"orgid", user.ErrNoOrgID, user.ErrNoOrgID.Error(), http.StatusBadRequest},
{"deadline", context.DeadlineExceeded, ErrDeadlineExceeded, http.StatusGatewayTimeout},
{"deadline multi", util.MultiError{context.DeadlineExceeded, context.DeadlineExceeded}, ErrDeadlineExceeded, http.StatusGatewayTimeout},
{"rpc deadline", status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err(), ErrDeadlineExceeded, http.StatusGatewayTimeout},
- {
- "rpc deadline multi",
- util.MultiError{
- status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err(),
- status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err(),
- },
- ErrDeadlineExceeded,
- http.StatusGatewayTimeout,
- },
- {
- "mixed context and rpc deadline",
- util.MultiError{context.DeadlineExceeded, status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err()},
- ErrDeadlineExceeded,
- http.StatusGatewayTimeout,
- },
- {
- "mixed context, rpc deadline and another",
- util.MultiError{errors.New("standard error"), context.DeadlineExceeded, status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err()},
- "3 errors: standard error; context deadline exceeded; rpc error: code = DeadlineExceeded desc = context deadline exceeded",
- http.StatusInternalServerError,
- },
+ {"rpc deadline multi", util.MultiError{status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err(), status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err()}, ErrDeadlineExceeded, http.StatusGatewayTimeout},
+ {"mixed context and rpc deadline", util.MultiError{context.DeadlineExceeded, status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err()}, ErrDeadlineExceeded, http.StatusGatewayTimeout},
+ {"mixed context, rpc deadline and another", util.MultiError{errors.New("standard error"), context.DeadlineExceeded, status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err()}, "3 errors: standard error; context deadline exceeded; rpc error: code = DeadlineExceeded desc = context deadline exceeded", http.StatusInternalServerError},
{"parse error", logqlmodel.ParseError{}, "parse error : ", http.StatusBadRequest},
{"httpgrpc", httpgrpc.Errorf(http.StatusBadRequest, errors.New("foo").Error()), "foo", http.StatusBadRequest},
{"internal", errors.New("foo"), "foo", http.StatusInternalServerError},
{"query error", chunk.ErrQueryMustContainMetricName, chunk.ErrQueryMustContainMetricName.Error(), http.StatusBadRequest},
- {
- "wrapped query error",
- fmt.Errorf("wrapped: %w", chunk.ErrQueryMustContainMetricName),
- "wrapped: " + chunk.ErrQueryMustContainMetricName.Error(),
- http.StatusBadRequest,
- },
- {
- "multi mixed",
- util.MultiError{context.Canceled, context.DeadlineExceeded},
- "2 errors: context canceled; context deadline exceeded",
- http.StatusInternalServerError,
- },
+ {"wrapped query error", fmt.Errorf("wrapped: %w", chunk.ErrQueryMustContainMetricName), "wrapped: " + chunk.ErrQueryMustContainMetricName.Error(), http.StatusBadRequest},
+ {"multi mixed", util.MultiError{context.Canceled, context.DeadlineExceeded}, "2 errors: context canceled; context deadline exceeded", http.StatusInternalServerError},
} {
t.Run(tt.name, func(t *testing.T) {
rec := httptest.NewRecorder()
WriteError(tt.err, rec)
- res := &ErrorResponseBody{}
- _ = json.NewDecoder(rec.Result().Body).Decode(res)
- require.Equal(t, tt.expectedStatus, res.Code)
require.Equal(t, tt.expectedStatus, rec.Result().StatusCode)
- require.Equal(t, tt.expectedMsg, res.Message)
+ b, err := ioutil.ReadAll(rec.Result().Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ require.Equal(t, tt.msg, string(b[:len(b)-1]))
})
}
}
|
loki
|
Revert #4845 which changed the format of errors from the API (#5772)
|
c4ebfd3020c698a747776a9934deb35bed9c6aed
|
2022-04-29 01:45:46
|
Karen Miller
|
docs: revise getting started guide (#5939)
| false
|
diff --git a/docs/sources/getting-started/_index.md b/docs/sources/getting-started/_index.md
index 99679731e5d7a..1f7dad3c12e97 100644
--- a/docs/sources/getting-started/_index.md
+++ b/docs/sources/getting-started/_index.md
@@ -1,14 +1,142 @@
---
title: Getting started
weight: 300
+description: "This guide assists the reader to create and use a simple Loki cluster for testing and evaluation purposes."
+aliases:
+ - /docs/loki/latest/getting-started/get-logs-into-loki/
---
+
# Getting started with Grafana Loki
> **Note:** You can use [Grafana Cloud](https://grafana.com/products/cloud/features/#cloud-logs) to avoid installing, maintaining, and scaling your own instance of Grafana Loki. The free forever plan includes 50GB of free logs. [Create an account to get started](https://grafana.com/auth/sign-up/create-user?pg=docs-loki&plcmt=in-text).
-1. [Getting Logs Into Loki](get-logs-into-loki/)
-1. [Grafana](grafana/)
-1. [LogCLI](logcli/)
-1. [Labels](labels/)
-1. [Troubleshooting](troubleshooting/)
+This guide assists the reader to create and use a simple Loki cluster.
+The cluster is intended for testing, development, and evaluation;
+it will not meet most production requirements.
+
+The test environment runs the [flog](https://github.com/mingrammer/flog) app to generate log lines.
+Promtail is the test environment's agent (or client) that captures the log lines and pushes them to the Loki cluster through a gateway.
+In a typical environment, the log-generating app and the agent run together, but in locations distinct from the Loki cluster. This guide runs each piece of the test environment locally, in Docker containers.
+
+Grafana provides a way to pose queries against the logs stored in Loki and visualize query results.
+
+
+
+The test environment uses Docker compose to instantiate these parts, each in its own container:
+
+- One [single scalable deployment](../fundamentals/architecture/deployment-modes/) mode **Loki** instance has:
+ - One Loki read component
+ - One Loki write component
+ - **Minio** is Loki's storage back end in the test environment.
+- The **gateway** receives requests and redirects them to the appropriate container based on the request's URL.
+- **Flog** generates log lines.
+- **Promtail** scrapes the log lines from flog, and pushes them to Loki through the gateway.
+- **Grafana** provides visualization of the log lines captured within Loki.
+
+## Prerequisites
+
+- [Docker](https://docs.docker.com/install)
+- [Docker Compose](https://docs.docker.com/compose/install)
+
+## Obtain the test environment
+
+1. Create a directory called `evaluate-loki` for the test environment. Make `evaluate-loki` your current working directory:
+ ```bash
+ mkdir evaluate-loki
+ cd evaluate-loki
+ ```
+1. Download `loki-config.yaml`, `promtail-local-config.yaml`, and `docker-compose.yaml`:
+
+ ```bash
+ wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/loki-config.yaml -O loki-config.yaml
+ wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/promtail-local-config.yaml -O promtail-local-config.yaml
+ wget https://raw.githubusercontent.com/grafana/loki/main/examples/getting-started/docker-compose.yaml -O docker-compose.yaml
+ ```
+
+## Deploy the test environment
+
+All shell commands are issued from the `evaluate-loki` directory.
+
+1. With `evaluate-loki` as the current working directory, deploy the test environment using `docker-compose`:
+ ```bash
+ docker-compose up -d
+ ```
+1. (Optional) Verify that the Loki cluster is up and running. The read component returns `ready` when you point a web browser at http://localhost:3101/ready. The message `Query Frontend not ready: not ready: number of schedulers this worker is connected to is 0` will show prior to the read component being ready.
+The write component returns `ready` when you point a web browser at http://localhost:3102/ready. The message `Ingester not ready: waiting for 15s after being ready` will show prior to the write component being ready.
+
+## Use Grafana and the test environment
+
+Use [Grafana](https://grafana.com/docs/grafana/latest/) to query and observe the log lines captured in the Loki cluster by navigating a browser to http://localhost:3000.
+The Grafana instance has Loki configured as a [datasource](https://grafana.com/docs/grafana/latest/datasources/loki/).
+
+Click on the Grafana instance's [Explore](https://grafana.com/docs/grafana/latest/explore/) icon to bring up the explore pane.
+
+Use the Explore dropdown menu to choose the Loki datasource and bring up the Loki query browser.
+
+Try some queries.
+Enter your query into the **Log browser** box, and click on the blue **Run query** button.
+
+To see all the log lines that flog has generated:
+```
+{container="evaluate-loki_flog_1"}
+```
+
+The flog app will generate log lines for invented HTTP requests.
+To see all `GET` log lines, enter the query:
+
+```
+{container="evaluate-loki_flog_1"} |= "GET"
+```
+For `POST` methods:
+```
+{container="evaluate-loki_flog_1"} |= "POST"
+```
+
+To see every log line with a 401 status (unauthorized error):
+```
+{container="evaluate-loki_flog_1"} | json | status="401"
+```
+To see every log line other than those that contain the value 401:
+```
+{container="evaluate-loki_flog_1"} != "401"
+```
+
+Refer to [query examples](../logql/query_examples/) for more examples.
+
+## Stop and clean up the test environment
+
+To break down the test environment:
+
+- Close the Grafana browser window
+
+- Stop and remove all the Docker containers. With `evaluate-loki` as the current working directory:
+ ```bash
+ docker-compose down
+ ```
+
+## Modifying the flog app output
+
+You can modify the flog app's log line generation by changing
+its configuration.
+Choose one of these two ways to apply a new configuration:
+
+- To remove already-generated logs, restart the test environment with a new configuration.
+
+ 1. Stop and clean up an existing test environment:
+ ```
+ docker-compose down
+ ```
+ 1. Edit the `docker-compose.yaml` file. Within the YAML file, change the `flog.command` field's value to specify your flog output.
+ 1. Instantiate the new test environment:
+ ```
+ docker-compose up
+ ```
+
+- To keep already-generated logs in the running test environment, restart flog with a new configuration.
+
+ 1. Edit the `docker-compose.yaml` file. Within the YAML file, change the `flog.command` field's value to specify your flog output.
+ 1. Restart only the flog app within the currently-running test environment:
+ ```
+ docker-compose up -d --force-recreate flog
+ ```
diff --git a/docs/sources/getting-started/get-logs-into-loki.md b/docs/sources/getting-started/get-logs-into-loki.md
deleted file mode 100644
index b90f70b9c2948..0000000000000
--- a/docs/sources/getting-started/get-logs-into-loki.md
+++ /dev/null
@@ -1,78 +0,0 @@
----
-title: Get logs into Loki
-weight: 10
----
-# Get logs into Grafana Loki
-
-After you [install and run Grafana Loki](../../installation/local/), you probably want to get logs from other applications into it.
-
-To get application logs into Loki, you need to edit the [Promtail]({{< relref "../clients/promtail" >}}) configuration file.
-
-Detailed information about configuring Promtail is available in [Promtail configuration](../../clients/promtail/configuration/).
-
-The following instructions should help you get started.
-
-1. If you haven't already, download a Promtail configuration file. Keep track of where it is, because you will need to cite it when you run the binary.
-
- ```
- wget https://raw.githubusercontent.com/grafana/loki/main/clients/cmd/promtail/promtail-local-config.yaml
- ```
-
-1. Open the configuration file in the text editor of your choice. It should look similar to this:
-
- ```
- server:
- http_listen_port: 9080
- grpc_listen_port: 0
-
- positions:
- filename: /tmp/positions.yaml
-
- clients:
- - url: http://loki:3100/loki/api/v1/push
-
- scrape_configs:
- - job_name: system
- static_configs:
- - targets:
- - localhost
- labels:
- job: varlogs
- __path__: /var/log/*log
- ```
-
- The seven lines under `scrape_configs` are what send the logs that Promtail generates to Loki, which then outputs them in the command line and http://localhost:3100/metrics.
-
- Copy the seven lines under `scrape_configs`, and then paste them under the original job. You can instead edit the original seven lines.
-
- Below is an example that sends logs from a default Grafana installation to Loki. We updated the following fields:
- - job_name - This differentiates the logs collected from other log groups.
- - targets - Optional for `static_configs`. However, is often defined because in older versions of Promtail it was not optional. This was an artifact from directly using the Prometheus service discovery code, which required this entry.
- - labels - Static label to apply to every log line scraped by this definition. Good examples include the environment name, job name, or app name.
- - __path__ - The path to where the logs that Loki is to consume are stored.
-
- ```
- - job_name: grafana
- static_configs:
- - targets:
- - grafana
- labels:
- job: grafana
- __path__: "C:/Program Files/GrafanaLabs/grafana/data/log/grafana.log"
- ```
-
-1. Enter the following command to run Promtail. Examples below assume you have placed the configuration file in the same directory as the binary.
-
- **Windows**
-
- ```
- .\promtail-windows-amd64.exe --config.file=promtail-local-config.yaml
- ```
-
- **Linux**
-
- ```
- ./promtail-linux-amd64 -config.file=promtail-local-config.yaml
- ```
-
-You should now see your application logs. If you are using Grafana, you might need to refresh your instance in order to see the logs.
diff --git a/docs/sources/getting-started/simple-scalable-test-environment.png b/docs/sources/getting-started/simple-scalable-test-environment.png
new file mode 100644
index 0000000000000..950cb70c30996
Binary files /dev/null and b/docs/sources/getting-started/simple-scalable-test-environment.png differ
diff --git a/docs/sources/installation/simple-scalable-docker.md b/docs/sources/installation/simple-scalable-docker.md
deleted file mode 100644
index 6a5d100fe3c83..0000000000000
--- a/docs/sources/installation/simple-scalable-docker.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-title: Simple scalable cluster
-weight: 35
----
-# Install and deploy a simple scalable cluster with Docker compose
-
-A local Docker compose installation of Grafana Loki and Promtail is appropriate for an evaluation, testing, or development environment.
-Use a Tanka or Helm process for a production environment.
-
-This installation runs Loki in a simple scalable deployment mode with one read path component and one write path component.
-
-## Prerequisites
-
-- [Docker](https://docs.docker.com/install)
-- [Docker Compose](https://docs.docker.com/compose/install)
-
-## Obtain Loki and Promtail configuration files
-
-Download `loki-config.yaml`, `promtail-config.yaml`, and `docker-compose.yaml` to your current directory:
-
-```bash
-wget https://raw.githubusercontent.com/grafana/loki/main/production/simple-scalable/promtail-config.yaml -O promtail-config.yaml
-wget https://raw.githubusercontent.com/grafana/loki/main/production/simple-scalable/loki-config.yaml -O loki-config.yaml
-wget https://raw.githubusercontent.com/grafana/loki/main/production/simple-scalable/docker-compose.yaml -O docker-compose.yaml
-```
-
-This will download `loki-config.yaml`, `promtail-config.yaml`, and `docker-compose.yaml` to your current directory.
-
-The `docker-compose.yaml` relies on the [Loki docker driver](https://grafana.com/docs/loki/latest/clients/docker-driver/),
-aliased to `loki-compose`, to send logs to the loki cluster. If this driver is not installed on your system, you can install it by running the following:
-
-```bash
-docker plugin install grafana/loki-docker-driver:latest --alias loki-compose --grant-all-permissions
-```
-
-If this driver is already installed, but under a different alias, you will have to change `docker-compose.yaml` to use the correct alias.
-
-## Deploy and verify readiness of the Loki cluster
-
-From the directory containing the configuration files, deploy the cluster with docker-compose:
-
-```bash
-docker-compose up
-```
-
-The running Docker containers use the directory's configuration files.
-
-Navigate to http://localhost:3101/ready to check for read container readiness.
-Navigate to http://localhost:3101/metrics to view read container metrics.
-
-Navigate to http://localhost:3102/ready to check for write container readiness.
-Navigate to http://localhost:3102/metrics to view write container metrics.
-
-Navigate to http://localhost:3000 for the Grafana instance that has Loki configured as a datasource.
-
-By default, the image runs processes as user loki with UID `10001` and GID `10001`.
-You can use a different user, specially if you are using bind mounts, by specifying the UID with a `docker run` command and using `--user=UID` with numeric UID suited to your needs.
diff --git a/docs/sources/getting-started/grafana.md b/docs/sources/operations/grafana.md
similarity index 95%
rename from docs/sources/getting-started/grafana.md
rename to docs/sources/operations/grafana.md
index f88d8857a168c..e00c46f4dfe8c 100644
--- a/docs/sources/getting-started/grafana.md
+++ b/docs/sources/operations/grafana.md
@@ -1,6 +1,8 @@
---
title: Loki in Grafana
-weight: 30
+weight: 15
+aliases:
+ - /docs/loki/latest/getting-started/grafana/
---
# Loki in Grafana
diff --git a/docs/sources/getting-started/troubleshooting.md b/docs/sources/operations/troubleshooting.md
similarity index 98%
rename from docs/sources/getting-started/troubleshooting.md
rename to docs/sources/operations/troubleshooting.md
index 95f2e0106b7d2..a13345772453f 100644
--- a/docs/sources/getting-started/troubleshooting.md
+++ b/docs/sources/operations/troubleshooting.md
@@ -1,6 +1,8 @@
---
title: Troubleshooting
-weight: 40
+weight: 80
+aliases:
+ - /docs/loki/latest/getting-started/troubleshooting/
---
# Troubleshooting Grafana Loki
diff --git a/docs/sources/tools/_index.md b/docs/sources/tools/_index.md
new file mode 100644
index 0000000000000..1af0e5699094e
--- /dev/null
+++ b/docs/sources/tools/_index.md
@@ -0,0 +1,7 @@
+---
+title: Tools
+weight: 1050
+---
+# Tools
+
+- [LogCLI](logcli/)
diff --git a/docs/sources/getting-started/logcli.md b/docs/sources/tools/logcli.md
similarity index 99%
rename from docs/sources/getting-started/logcli.md
rename to docs/sources/tools/logcli.md
index a657ee1e43f4d..2b8cef4691890 100644
--- a/docs/sources/getting-started/logcli.md
+++ b/docs/sources/tools/logcli.md
@@ -1,6 +1,8 @@
---
title: LogCLI
weight: 20
+aliases:
+ - /docs/loki/latest/getting-started/logcli/
---
# LogCLI, Grafana Loki's command-line interface
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 0000000000000..b41ab1d3d6fc4
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,11 @@
+# Loki examples
+
+This directory contains examples and example configuration for Loki.
+
+## Hosted logs in Grafana Cloud
+
+You can use [Grafana Cloud](https://grafana.com/products/cloud/features/#cloud-logs) to avoid installing, maintaining, and scaling your own instance of Grafana Loki. The free forever plan includes 50GB of free logs. [Create an account to get started](https://grafana.com/auth/sign-up/create-user?pg=docs-loki&plcmt=in-text).
+
+## Getting started with Loki
+
+Configuration files in the `getting-started` directory are used by the [Loki getting started guide](https://grafana.com/docs/loki/latest/getting-started/).
diff --git a/examples/getting-started/README.md b/examples/getting-started/README.md
new file mode 100644
index 0000000000000..072e54d72901c
--- /dev/null
+++ b/examples/getting-started/README.md
@@ -0,0 +1,4 @@
+# Loki getting started guide configuration
+
+This directory contains configuration to use with the [Loki getting started guide](https://grafana.com/docs/loki/latest/getting-started/).
+
diff --git a/production/simple-scalable/docker-compose.yaml b/examples/getting-started/docker-compose.yaml
similarity index 90%
rename from production/simple-scalable/docker-compose.yaml
rename to examples/getting-started/docker-compose.yaml
index c34aeebe58019..75ca39c0dce76 100644
--- a/production/simple-scalable/docker-compose.yaml
+++ b/examples/getting-started/docker-compose.yaml
@@ -37,15 +37,9 @@ services:
promtail:
image: grafana/promtail:2.5.0
- logging: &logging
- driver: loki-compose
- options:
- loki-url: "http://localhost:3100/loki/api/v1/push"
- loki-retries: "1"
- loki-tenant-id: "tenant1"
volumes:
- - /var/log:/var/log
- - ./promtail-config.yaml:/etc/promtail/config.yaml:ro
+ - ./promtail-local-config.yaml:/etc/promtail/config.yaml:ro
+ - /var/run/docker.sock:/var/run/docker.sock
command: -config.file=/etc/promtail/config.yaml
depends_on:
- gateway
@@ -66,8 +60,6 @@ services:
- MINIO_SECRET_KEY=supersecret
- MINIO_PROMETHEUS_AUTH_TYPE=public
- MINIO_UPDATE=off
- logging:
- <<: *logging
ports:
- 9000
volumes:
@@ -77,8 +69,6 @@ services:
grafana:
image: grafana/grafana:latest
- logging:
- <<: *logging
environment:
- GF_PATHS_PROVISIONING=/etc/grafana/provisioning
- GF_AUTH_ANONYMOUS_ENABLED=true
@@ -109,8 +99,6 @@ services:
- loki
gateway:
- logging:
- <<: *logging
image: nginx:latest
depends_on:
- read
@@ -173,3 +161,9 @@ services:
- "3100:3100"
networks:
- loki
+
+ flog:
+ image: mingrammer/flog
+ command: -f json -d 1s -l
+ networks:
+ - loki
diff --git a/production/simple-scalable/loki-config.yaml b/examples/getting-started/loki-config.yaml
similarity index 100%
rename from production/simple-scalable/loki-config.yaml
rename to examples/getting-started/loki-config.yaml
diff --git a/examples/getting-started/promtail-local-config.yaml b/examples/getting-started/promtail-local-config.yaml
new file mode 100644
index 0000000000000..dcb2d3eed81a2
--- /dev/null
+++ b/examples/getting-started/promtail-local-config.yaml
@@ -0,0 +1,22 @@
+---
+server:
+ http_listen_port: 9080
+ grpc_listen_port: 0
+
+positions:
+ filename: /tmp/positions.yaml
+
+clients:
+ - url: http://gateway:3100/loki/api/v1/push
+ tenant_id: tenant1
+
+scrape_configs:
+ - job_name: flog_scrape
+ docker_sd_configs:
+ - host: unix:///var/run/docker.sock
+ refresh_interval: 5s
+ relabel_configs:
+ - source_labels: ['__meta_docker_container_name']
+ regex: '/(.*)'
+ target_label: 'container'
+
diff --git a/production/simple-scalable/.dockerignore b/production/simple-scalable/.dockerignore
deleted file mode 100644
index ea26ec23a7301..0000000000000
--- a/production/simple-scalable/.dockerignore
+++ /dev/null
@@ -1 +0,0 @@
-.data
diff --git a/production/simple-scalable/.gitignore b/production/simple-scalable/.gitignore
deleted file mode 100644
index ea26ec23a7301..0000000000000
--- a/production/simple-scalable/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.data
diff --git a/production/simple-scalable/promtail-config.yaml b/production/simple-scalable/promtail-config.yaml
deleted file mode 100644
index 0ff1a73fd2260..0000000000000
--- a/production/simple-scalable/promtail-config.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-server:
- http_listen_port: 9080
- grpc_listen_port: 0
-
-positions:
- filename: /tmp/positions.yaml
-
-clients:
- - url: http://gateway:3100/loki/api/v1/push
- tenant_id: tenant1
-
-scrape_configs:
- - job_name: system
- static_configs:
- - targets:
- - localhost
- labels:
- job: varlogs
- __path__: /var/log/*log
|
docs
|
revise getting started guide (#5939)
|
703d68bc76844c4b6335bc3450722742b89a37e7
|
2021-10-22 02:41:56
|
Danny Kopping
|
promtail: allow for customisable stream lag labels (#4507)
| false
|
diff --git a/clients/pkg/promtail/client/client.go b/clients/pkg/promtail/client/client.go
index 03b5665d8d3ac..6e25e532a57eb 100644
--- a/clients/pkg/promtail/client/client.go
+++ b/clients/pkg/promtail/client/client.go
@@ -320,14 +320,21 @@ func (c *client) sendBatch(tenantID string, batch *batch) {
}
var lblSet model.LabelSet
for i := range lbls {
- if lbls[i].Name == LatencyLabel {
- lblSet = model.LabelSet{
- model.LabelName(HostLabel): model.LabelValue(c.cfg.URL.Host),
- model.LabelName(LatencyLabel): model.LabelValue(lbls[i].Value),
+ for _, lbl := range c.cfg.StreamLagLabels {
+ if lbls[i].Name == lbl {
+ if lblSet == nil {
+ lblSet = model.LabelSet{}
+ }
+
+ lblSet = lblSet.Merge(model.LabelSet{
+ model.LabelName(lbl): model.LabelValue(lbls[i].Value),
+ })
}
}
}
if lblSet != nil {
+ // always set host
+ lblSet = lblSet.Merge(model.LabelSet{model.LabelName(HostLabel): model.LabelValue(c.cfg.URL.Host)})
c.metrics.streamLag.With(lblSet).Set(time.Since(s.Entries[len(s.Entries)-1].Timestamp).Seconds())
}
}
diff --git a/clients/pkg/promtail/client/config.go b/clients/pkg/promtail/client/config.go
index 1507e04eb10fd..bd38aa71ccc54 100644
--- a/clients/pkg/promtail/client/config.go
+++ b/clients/pkg/promtail/client/config.go
@@ -37,6 +37,8 @@ type Config struct {
// The tenant ID to use when pushing logs to Loki (empty string means
// single tenant mode)
TenantID string `yaml:"tenant_id"`
+
+ StreamLagLabels flagext.StringSliceCSV `yaml:"stream_lag_labels"`
}
// RegisterFlags with prefix registers flags where every name is prefixed by
@@ -53,6 +55,9 @@ func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.Var(&c.ExternalLabels, prefix+"client.external-labels", "list of external labels to add to each log (e.g: --client.external-labels=lb1=v1,lb2=v2)")
f.StringVar(&c.TenantID, prefix+"client.tenant-id", "", "Tenant ID to use when pushing logs to Loki.")
+
+ c.StreamLagLabels = []string{"filename"}
+ f.Var(&c.StreamLagLabels, prefix+"client.stream-lag-labels", "Comma-separated list of labels to use when calculating stream lag")
}
// RegisterFlags registers flags.
@@ -75,9 +80,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
MaxRetries: MaxRetries,
MinBackoff: MinBackoff,
},
- BatchSize: BatchSize,
- BatchWait: BatchWait,
- Timeout: Timeout,
+ BatchSize: BatchSize,
+ BatchWait: BatchWait,
+ Timeout: Timeout,
+ StreamLagLabels: []string{"filename"},
}
}
diff --git a/clients/pkg/promtail/client/config_test.go b/clients/pkg/promtail/client/config_test.go
index f5ee8aad3c057..b237fdd9c39ce 100644
--- a/clients/pkg/promtail/client/config_test.go
+++ b/clients/pkg/promtail/client/config_test.go
@@ -48,9 +48,10 @@ func Test_Config(t *testing.T) {
MaxRetries: MaxRetries,
MinBackoff: MinBackoff,
},
- BatchSize: BatchSize,
- BatchWait: BatchWait,
- Timeout: Timeout,
+ BatchSize: BatchSize,
+ BatchWait: BatchWait,
+ Timeout: Timeout,
+ StreamLagLabels: []string{"filename"},
},
},
{
@@ -64,9 +65,10 @@ func Test_Config(t *testing.T) {
MaxRetries: 20,
MinBackoff: 5 * time.Second,
},
- BatchSize: 100 * 2048,
- BatchWait: 5 * time.Second,
- Timeout: 5 * time.Second,
+ BatchSize: 100 * 2048,
+ BatchWait: 5 * time.Second,
+ Timeout: 5 * time.Second,
+ StreamLagLabels: []string{"filename"},
},
},
}
diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md
index 4cb4bee7a9649..b1e0050bf1d93 100644
--- a/docs/sources/clients/promtail/configuration.md
+++ b/docs/sources/clients/promtail/configuration.md
@@ -273,6 +273,12 @@ external_labels:
# Maximum time to wait for a server to respond to a request
[timeout: <duration> | default = 10s]
+
+# A comma-separated list of labels to include in the stream lag metric `promtail_stream_lag_seconds`.
+# The default value is "filename". A "host" label is always included.
+# The stream lag metric indicates which streams are falling behind on writes to Loki;
+# be mindful about not using too many labels here as it can explode cardinality.
+[stream_lag_labels: <string> | default = "filename"]
```
## positions
|
promtail
|
allow for customisable stream lag labels (#4507)
|
048587f1a6e9e74a889292305b7eac326ffee4ed
|
2023-10-16 19:25:50
|
Ashwanth
|
ksonnet: remove usage of already deleted config match_max_concurrent (#10910)
| false
|
diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet
index bae016daf8c1e..108ffc3292731 100644
--- a/production/ksonnet/loki/config.libsonnet
+++ b/production/ksonnet/loki/config.libsonnet
@@ -180,7 +180,6 @@
log_queries_longer_than: '5s',
},
frontend_worker: {
- match_max_concurrent: true,
grpc_client_config: {
max_send_msg_size: $._config.grpc_server_max_msg_size,
},
|
ksonnet
|
remove usage of already deleted config match_max_concurrent (#10910)
|
e7a1b67c0a5ab407061ab662874a1eae373177c5
|
2022-04-18 20:58:41
|
Fanis Hatzidakis
|
docs: Fix typo in 429 exceeded rate limit error message (#5944)
| false
|
diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go
index 1ca21209e8c53..84641e20c403f 100644
--- a/pkg/util/validation/limits.go
+++ b/pkg/util/validation/limits.go
@@ -652,7 +652,7 @@ func SmallestPositiveIntPerTenant(tenantIDs []string, f func(string) int) int {
// SmallestPositiveNonZeroIntPerTenant is returning the minimal positive and
// non-zero value of the supplied limit function for all given tenants. In many
-// limits a value of 0 means unlimted so the method will return 0 only if all
+// limits a value of 0 means unlimited so the method will return 0 only if all
// inputs have a limit of 0 or an empty tenant list is given.
func SmallestPositiveNonZeroIntPerTenant(tenantIDs []string, f func(string) int) int {
var result *int
@@ -670,7 +670,7 @@ func SmallestPositiveNonZeroIntPerTenant(tenantIDs []string, f func(string) int)
// SmallestPositiveNonZeroDurationPerTenant is returning the minimal positive
// and non-zero value of the supplied limit function for all given tenants. In
-// many limits a value of 0 means unlimted so the method will return 0 only if
+// many limits a value of 0 means unlimited so the method will return 0 only if
// all inputs have a limit of 0 or an empty tenant list is given.
func SmallestPositiveNonZeroDurationPerTenant(tenantIDs []string, f func(string) time.Duration) time.Duration {
var result *time.Duration
diff --git a/pkg/validation/validate.go b/pkg/validation/validate.go
index e38f1bd3376e7..f2a9114ee255a 100644
--- a/pkg/validation/validate.go
+++ b/pkg/validation/validate.go
@@ -59,7 +59,7 @@ type ErrStreamRateLimit struct {
}
func (e *ErrStreamRateLimit) Error() string {
- return fmt.Sprintf("Per stream rate limit exceeded (limit: %s/sec) while attempting to ingest for stream '%s' totaling %s, consider splitting a stream via additional labels or contact your Loki administrator to see if the limt can be increased",
+ return fmt.Sprintf("Per stream rate limit exceeded (limit: %s/sec) while attempting to ingest for stream '%s' totaling %s, consider splitting a stream via additional labels or contact your Loki administrator to see if the limit can be increased",
e.RateLimit.String(),
e.Labels,
e.Bytes.String())
|
docs
|
Fix typo in 429 exceeded rate limit error message (#5944)
|
c65721e7ade0ef89fd282d9f764fb2d05f6b9c42
|
2024-09-10 12:57:59
|
Vladyslav Diachenko
|
feat: track discarded data by usageTracker (#14081)
| false
|
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 080db0d87221c..f6ae454e1482a 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -456,8 +456,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
now := time.Now()
if block, until, retStatusCode := d.validator.ShouldBlockIngestion(validationContext, now); block {
- validation.DiscardedSamples.WithLabelValues(validation.BlockedIngestion, tenantID).Add(float64(validatedLineCount))
- validation.DiscardedBytes.WithLabelValues(validation.BlockedIngestion, tenantID).Add(float64(validatedLineSize))
+ d.trackDiscardedData(ctx, req, validationContext, tenantID, validatedLineCount, validatedLineSize, validation.BlockedIngestion)
err = fmt.Errorf(validation.BlockedIngestionErrorMsg, tenantID, until.Format(time.RFC3339), retStatusCode)
d.writeFailuresManager.Log(tenantID, err)
@@ -472,30 +471,11 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
if !d.ingestionRateLimiter.AllowN(now, tenantID, validatedLineSize) {
- // Return a 429 to indicate to the client they are being rate limited
- validation.DiscardedSamples.WithLabelValues(validation.RateLimited, tenantID).Add(float64(validatedLineCount))
- validation.DiscardedBytes.WithLabelValues(validation.RateLimited, tenantID).Add(float64(validatedLineSize))
-
- if d.usageTracker != nil {
- for _, stream := range req.Streams {
- lbs, _, _, err := d.parseStreamLabels(validationContext, stream.Labels, stream)
- if err != nil {
- continue
- }
-
- discardedStreamBytes := 0
- for _, e := range stream.Entries {
- discardedStreamBytes += len(e.Line)
- }
-
- if d.usageTracker != nil {
- d.usageTracker.DiscardedBytesAdd(ctx, tenantID, validation.RateLimited, lbs, float64(discardedStreamBytes))
- }
- }
- }
+ d.trackDiscardedData(ctx, req, validationContext, tenantID, validatedLineCount, validatedLineSize, validation.RateLimited)
err = fmt.Errorf(validation.RateLimitedErrorMsg, tenantID, int(d.ingestionRateLimiter.Limit(now, tenantID)), validatedLineCount, validatedLineSize)
d.writeFailuresManager.Log(tenantID, err)
+ // Return a 429 to indicate to the client they are being rate limited
return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "%s", err.Error())
}
@@ -569,6 +549,37 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
}
+func (d *Distributor) trackDiscardedData(
+ ctx context.Context,
+ req *logproto.PushRequest,
+ validationContext validationContext,
+ tenantID string,
+ validatedLineCount int,
+ validatedLineSize int,
+ reason string,
+) {
+ validation.DiscardedSamples.WithLabelValues(reason, tenantID).Add(float64(validatedLineCount))
+ validation.DiscardedBytes.WithLabelValues(reason, tenantID).Add(float64(validatedLineSize))
+
+ if d.usageTracker != nil {
+ for _, stream := range req.Streams {
+ lbs, _, _, err := d.parseStreamLabels(validationContext, stream.Labels, stream)
+ if err != nil {
+ continue
+ }
+
+ discardedStreamBytes := 0
+ for _, e := range stream.Entries {
+ discardedStreamBytes += len(e.Line)
+ }
+
+ if d.usageTracker != nil {
+ d.usageTracker.DiscardedBytesAdd(ctx, tenantID, reason, lbs, float64(discardedStreamBytes))
+ }
+ }
+ }
+}
+
func hasAnyLevelLabels(l labels.Labels) (string, bool) {
for lbl := range allowedLabelsForLevel {
if l.Has(lbl) {
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index e9528c361bda0..2e8f7b895e0f9 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -124,7 +124,7 @@ func TestDistributor(t *testing.T) {
if len(tc.expectedErrors) > 0 {
for _, expectedError := range tc.expectedErrors {
if len(tc.expectedErrors) == 1 {
- assert.Equal(t, err, expectedError)
+ assert.Equal(t, expectedError, err)
} else {
assert.Contains(t, err.Error(), expectedError.Error())
}
|
feat
|
track discarded data by usageTracker (#14081)
|
04994ca8218e82711aa77679d1392c05935d2416
|
2024-12-19 17:43:15
|
Karsten Jeschkies
|
perf: Improve `approx_topk` performance by reducing allocations. (#15450)
| false
|
diff --git a/pkg/logql/count_min_sketch.go b/pkg/logql/count_min_sketch.go
index 2d2e607f09598..e24e089ad307b 100644
--- a/pkg/logql/count_min_sketch.go
+++ b/pkg/logql/count_min_sketch.go
@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/axiomhq/hyperloglog"
+ "github.com/cespare/xxhash/v2"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
@@ -157,8 +158,13 @@ type HeapCountMinSketchVector struct {
CountMinSketchVector
// internal set of observed events
- observed map[string]struct{}
+ observed map[uint64]struct{}
maxLabels int
+
+ // The buffers are used by `labels.Bytes` similar to `series.Hash` in `codec.MergeResponse`. They are alloccated
+ // outside of the method in order to reuse them for the next `Add` call. This saves a lot of allocations.
+ // 1KB is used for `b` after some experimentation. Reusing the buffer is not thread safe.
+ buffer []byte
}
func NewHeapCountMinSketchVector(ts int64, metricsLength, maxLabels int) HeapCountMinSketchVector {
@@ -172,31 +178,39 @@ func NewHeapCountMinSketchVector(ts int64, metricsLength, maxLabels int) HeapCou
CountMinSketchVector: CountMinSketchVector{
T: ts,
F: f,
- Metrics: make([]labels.Labels, 0, metricsLength),
+ Metrics: make([]labels.Labels, 0, metricsLength+1),
},
- observed: make(map[string]struct{}),
+ observed: make(map[uint64]struct{}),
maxLabels: maxLabels,
+ buffer: make([]byte, 0, 1024),
}
}
func (v *HeapCountMinSketchVector) Add(metric labels.Labels, value float64) {
- // TODO: we save a lot of allocations by reusing the buffer inside metric.String
- metricString := metric.String()
- v.F.Add(metricString, value)
+ v.buffer = metric.Bytes(v.buffer)
+
+ v.F.Add(v.buffer, value)
// Add our metric if we haven't seen it
- if _, ok := v.observed[metricString]; !ok {
+
+ // TODO(karsten): There is a chance that the ids match but not the labels due to hash collision. Ideally there's
+ // an else block the compares the series labels. However, that's not trivial. Besides, instance.Series has the
+ // same issue in its deduping logic.
+ id := xxhash.Sum64(v.buffer)
+ if _, ok := v.observed[id]; !ok {
heap.Push(v, metric)
- v.observed[metricString] = struct{}{}
- } else if v.Metrics[0].String() == metricString {
- // The smalles element has been updated to fix the heap.
+ v.observed[id] = struct{}{}
+ } else if labels.Equal(v.Metrics[0], metric) {
+ // The smallest element has been updated to fix the heap.
heap.Fix(v, 0)
}
// The maximum number of labels has been reached, so drop the smallest element.
if len(v.Metrics) > v.maxLabels {
metric := heap.Pop(v).(labels.Labels)
- delete(v.observed, metric.String())
+ v.buffer = metric.Bytes(v.buffer)
+ id := xxhash.Sum64(v.buffer)
+ delete(v.observed, id)
}
}
@@ -205,8 +219,11 @@ func (v HeapCountMinSketchVector) Len() int {
}
func (v HeapCountMinSketchVector) Less(i, j int) bool {
- left := v.F.Count(v.Metrics[i].String())
- right := v.F.Count(v.Metrics[j].String())
+ v.buffer = v.Metrics[i].Bytes(v.buffer)
+ left := v.F.Count(v.buffer)
+
+ v.buffer = v.Metrics[j].Bytes(v.buffer)
+ right := v.F.Count(v.buffer)
return left < right
}
@@ -295,6 +312,11 @@ func (e *countMinSketchVectorAggEvaluator) Error() error {
type CountMinSketchVectorStepEvaluator struct {
exhausted bool
vec *CountMinSketchVector
+
+ // The buffers are used by `labels.Bytes` similar to `series.Hash` in `codec.MergeResponse`. They are alloccated
+ // outside of the method in order to reuse them for the next `Next` call. This saves a lot of allocations.
+ // 1KB is used for `b` after some experimentation. Reusing the buffer is not thread safe.
+ buffer []byte
}
var _ StepEvaluator = NewQuantileSketchVectorStepEvaluator(nil, 0)
@@ -303,6 +325,7 @@ func NewCountMinSketchVectorStepEvaluator(vec *CountMinSketchVector) *CountMinSk
return &CountMinSketchVectorStepEvaluator{
exhausted: false,
vec: vec,
+ buffer: make([]byte, 0, 1024),
}
}
@@ -315,7 +338,8 @@ func (e *CountMinSketchVectorStepEvaluator) Next() (bool, int64, StepResult) {
for i, labels := range e.vec.Metrics {
- f := e.vec.F.Count(labels.String())
+ e.buffer = labels.Bytes(e.buffer)
+ f := e.vec.F.Count(e.buffer)
vec[i] = promql.Sample{
T: e.vec.T,
diff --git a/pkg/logql/count_min_sketch_test.go b/pkg/logql/count_min_sketch_test.go
index 2b2c2960e4515..9b75e546f7c8d 100644
--- a/pkg/logql/count_min_sketch_test.go
+++ b/pkg/logql/count_min_sketch_test.go
@@ -1,6 +1,8 @@
package logql
import (
+ "fmt"
+ "math/rand"
"testing"
"github.com/grafana/loki/v3/pkg/logproto"
@@ -57,8 +59,9 @@ func TestCountMinSketchSerialization(t *testing.T) {
T: 42,
F: cms,
},
- observed: make(map[string]struct{}, 0),
+ observed: make(map[uint64]struct{}, 0),
maxLabels: 10_000,
+ buffer: make([]byte, 0, 1024),
}
vec.Add(metric, 42.0)
@@ -68,7 +71,7 @@ func TestCountMinSketchSerialization(t *testing.T) {
Sketch: &logproto.CountMinSketch{
Depth: 2,
Width: 4,
- Counters: []float64{0, 0, 0, 42, 0, 42, 0, 0},
+ Counters: []float64{0, 42, 0, 0, 0, 42, 0, 0},
Hyperloglog: hllBytes,
},
Metrics: []*logproto.Labels{
@@ -86,3 +89,30 @@ func TestCountMinSketchSerialization(t *testing.T) {
// The HeapCountMinSketchVector is serialized to a CountMinSketchVector.
require.Equal(t, round, vec.CountMinSketchVector)
}
+
+func BenchmarkHeapCountMinSketchVectorAdd(b *testing.B) {
+ maxLabels := 10_000
+ v := NewHeapCountMinSketchVector(0, maxLabels, maxLabels)
+ if len(v.Metrics) > maxLabels || cap(v.Metrics) > maxLabels+1 {
+ b.Errorf("Length or capcity of metrics is too high: len=%d cap=%d", len(v.Metrics), cap(v.Metrics))
+ }
+
+ eventsCount := 100_000
+ uniqueEventsCount := 20_000
+ events := make([]labels.Labels, eventsCount)
+ for i := range events {
+ events[i] = labels.Labels{{Name: "event", Value: fmt.Sprintf("%d", i%uniqueEventsCount)}}
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for n := 0; n < b.N; n++ {
+ for _, event := range events {
+ v.Add(event, rand.Float64())
+ if len(v.Metrics) > maxLabels || cap(v.Metrics) > maxLabels+1 {
+ b.Errorf("Length or capcity of metrics is too high: len=%d cap=%d", len(v.Metrics), cap(v.Metrics))
+ }
+ }
+ }
+}
diff --git a/pkg/logql/sketch/cms.go b/pkg/logql/sketch/cms.go
index 9c8e72955ef33..b510ee8504ea0 100644
--- a/pkg/logql/sketch/cms.go
+++ b/pkg/logql/sketch/cms.go
@@ -10,7 +10,7 @@ import (
type CountMinSketch struct {
Depth, Width uint32
Counters [][]float64
- HyperLogLog *hyperloglog.Sketch //hyperloglog.New16(),
+ HyperLogLog *hyperloglog.Sketch // hyperloglog.New16(),
}
// NewCountMinSketch creates a new CMS for a given width and depth.
@@ -46,8 +46,8 @@ func (s *CountMinSketch) getPos(h1, h2, row uint32) uint32 {
}
// Add 'count' occurrences of the given input.
-func (s *CountMinSketch) Add(event string, count float64) {
- s.HyperLogLog.Insert(unsafeGetBytes(event))
+func (s *CountMinSketch) Add(event []byte, count float64) {
+ s.HyperLogLog.Insert(event)
// see the comments in the hashn function for how using only 2
// hash functions rather than a function per row still fullfils
// the pairwise indendent hash functions requirement for CMS
@@ -58,7 +58,7 @@ func (s *CountMinSketch) Add(event string, count float64) {
}
}
-func (s *CountMinSketch) Increment(event string) {
+func (s *CountMinSketch) Increment(event []byte) {
s.Add(event, 1)
}
@@ -69,8 +69,8 @@ func (s *CountMinSketch) Increment(event string) {
// value that's less than Count(h) + count rather than all counters that h hashed to.
// Returns the new estimate for the event as well as the both hashes which can be used
// to identify the event for other things that need a hash.
-func (s *CountMinSketch) ConservativeAdd(event string, count float64) (float64, uint32, uint32) {
- s.HyperLogLog.Insert(unsafeGetBytes(event))
+func (s *CountMinSketch) ConservativeAdd(event []byte, count float64) (float64, uint32, uint32) {
+ s.HyperLogLog.Insert(event)
min := float64(math.MaxUint64)
@@ -94,12 +94,12 @@ func (s *CountMinSketch) ConservativeAdd(event string, count float64) (float64,
return min, h1, h2
}
-func (s *CountMinSketch) ConservativeIncrement(event string) (float64, uint32, uint32) {
+func (s *CountMinSketch) ConservativeIncrement(event []byte) (float64, uint32, uint32) {
return s.ConservativeAdd(event, float64(1))
}
// Count returns the approximate min count for the given input.
-func (s *CountMinSketch) Count(event string) float64 {
+func (s *CountMinSketch) Count(event []byte) float64 {
min := float64(math.MaxUint64)
h1, h2 := hashn(event)
diff --git a/pkg/logql/sketch/cms_test.go b/pkg/logql/sketch/cms_test.go
index 881141dfb9acb..fe439da10da01 100644
--- a/pkg/logql/sketch/cms_test.go
+++ b/pkg/logql/sketch/cms_test.go
@@ -43,7 +43,7 @@ func TestCMS(_ *testing.T) {
for _, e := range events {
for i := 0; i < e.count; i++ {
- cms.ConservativeIncrement(e.name)
+ cms.ConservativeIncrement(unsafeGetBytes(e.name))
}
}
}
diff --git a/pkg/logql/sketch/hash.go b/pkg/logql/sketch/hash.go
index 3bfb294d94532..87acfa54a3fef 100644
--- a/pkg/logql/sketch/hash.go
+++ b/pkg/logql/sketch/hash.go
@@ -24,7 +24,7 @@ import "hash/fnv"
// SOFTWARE.
//
// <http://www.opensource.org/licenses/mit-license.php>
-func hashn(s string) (h1, h2 uint32) {
+func hashn(s []byte) (h1, h2 uint32) {
// This construction comes from
// http://www.eecs.harvard.edu/~michaelm/postscripts/tr-02-05.pdf
// "Building a Better Bloom Filter", by Kirsch and Mitzenmacher. Their
@@ -34,7 +34,7 @@ func hashn(s string) (h1, h2 uint32) {
// Empirically, though, this seems to work "just fine".
fnv1a := fnv.New32a()
- fnv1a.Write([]byte(s))
+ fnv1a.Write(s)
h1 = fnv1a.Sum32()
// inlined jenkins one-at-a-time hash
diff --git a/pkg/logql/sketch/topk.go b/pkg/logql/sketch/topk.go
index 86b01e4c56638..c1c3ae9bdf895 100644
--- a/pkg/logql/sketch/topk.go
+++ b/pkg/logql/sketch/topk.go
@@ -196,8 +196,8 @@ func (t *Topk) heapMinReplace(event string, estimate float64, removed string) {
// updates the BF to ensure that the removed event won't be mistakenly thought
// to be in the heap, and updates the BF to ensure that we would get a truthy result for the added event
func (t *Topk) updateBF(removed, added string) {
- r1, r2 := hashn(removed)
- a1, a2 := hashn(added)
+ r1, r2 := hashn(unsafeGetBytes(removed))
+ a1, a2 := hashn(unsafeGetBytes(added))
var pos uint32
for i := range t.bf {
// removed event
@@ -230,7 +230,7 @@ func unsafeGetBytes(s string) []byte {
// for each node in the heap and rebalance the heap, and then if the event we're observing has an estimate that is still
// greater than the minimum heap element count, we should put this event into the heap and remove the other one.
func (t *Topk) Observe(event string) {
- estimate, h1, h2 := t.sketch.ConservativeIncrement(event)
+ estimate, h1, h2 := t.sketch.ConservativeIncrement(unsafeGetBytes(event))
t.hll.Insert(unsafeGetBytes(event))
if t.InTopk(h1, h2) {
@@ -246,12 +246,12 @@ func (t *Topk) Observe(event string) {
var h1, h2 uint32
var pos uint32
for i := range *t.heap {
- (*t.heap)[i].count = t.sketch.Count((*t.heap)[i].event)
+ (*t.heap)[i].count = t.sketch.Count(unsafeGetBytes((*t.heap)[i].event))
if i <= len(*t.heap)/2 {
heap.Fix(t.heap, i)
}
// ensure all the bf buckets are truthy for the event
- h1, h2 = hashn((*t.heap)[i].event)
+ h1, h2 = hashn(unsafeGetBytes((*t.heap)[i].event))
for j := range t.bf {
pos = t.sketch.getPos(h1, h2, uint32(j))
t.bf[j][pos] = true
@@ -304,11 +304,11 @@ func (t *Topk) Merge(from *Topk) error {
var all TopKResult
for _, e := range *t.heap {
- all = append(all, element{Event: e.event, Count: t.sketch.Count(e.event)})
+ all = append(all, element{Event: e.event, Count: t.sketch.Count(unsafeGetBytes(e.event))})
}
for _, e := range *from.heap {
- all = append(all, element{Event: e.event, Count: t.sketch.Count(e.event)})
+ all = append(all, element{Event: e.event, Count: t.sketch.Count(unsafeGetBytes(e.event))})
}
all = removeDuplicates(all)
@@ -317,7 +317,7 @@ func (t *Topk) Merge(from *Topk) error {
var h1, h2 uint32
// TODO: merging should also potentially replace it's bloomfilter? or 0 everything in the bloomfilter
for _, e := range all[:t.max] {
- h1, h2 = hashn(e.Event)
+ h1, h2 = hashn(unsafeGetBytes(e.Event))
t.heapPush(temp, e.Event, float64(e.Count), h1, h2)
}
t.heap = temp
@@ -347,7 +347,7 @@ func (t *Topk) Topk() TopKResult {
for _, e := range *t.heap {
res = append(res, element{
Event: e.event,
- Count: t.sketch.Count(e.event),
+ Count: t.sketch.Count(unsafeGetBytes(e.event)),
})
}
sort.Sort(res)
|
perf
|
Improve `approx_topk` performance by reducing allocations. (#15450)
|
963779064ba66e3e864de7263ad8aaef50c39a40
|
2024-09-19 19:30:18
|
Christian Haudum
|
chore: Move compression utilities into separate package (#14167)
| false
|
diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go
index 6cc2ecfa32f61..24710944f8e10 100644
--- a/pkg/bloombuild/builder/builder.go
+++ b/pkg/bloombuild/builder/builder.go
@@ -21,7 +21,7 @@ import (
"github.com/grafana/loki/v3/pkg/bloombuild/common"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/storage"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
@@ -333,7 +333,7 @@ func (b *Builder) processTask(
return nil, fmt.Errorf("failed to get client: %w", err)
}
- blockEnc, err := chunkenc.ParseEncoding(b.limits.BloomBlockEncoding(task.Tenant))
+ blockEnc, err := compression.ParseEncoding(b.limits.BloomBlockEncoding(task.Tenant))
if err != nil {
return nil, fmt.Errorf("failed to parse block encoding: %w", err)
}
diff --git a/pkg/bloombuild/builder/spec_test.go b/pkg/bloombuild/builder/spec_test.go
index 8ab6c2bba4f46..62e3f70bf22f9 100644
--- a/pkg/bloombuild/builder/spec_test.go
+++ b/pkg/bloombuild/builder/spec_test.go
@@ -10,7 +10,7 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
v2 "github.com/grafana/loki/v3/pkg/iter/v2"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
@@ -115,7 +115,7 @@ func dummyBloomGen(t *testing.T, opts v1.BlockOptions, store v2.Iterator[*v1.Ser
func TestSimpleBloomGenerator(t *testing.T) {
const maxBlockSize = 100 << 20 // 100MB
- for _, enc := range []chunkenc.Encoding{chunkenc.EncNone, chunkenc.EncGZIP, chunkenc.EncSnappy} {
+ for _, enc := range []compression.Encoding{compression.EncNone, compression.EncGZIP, compression.EncSnappy} {
for _, tc := range []struct {
desc string
fromSchema, toSchema v1.BlockOptions
diff --git a/pkg/bloombuild/common/tsdb.go b/pkg/bloombuild/common/tsdb.go
index a2e22529523b2..ea31767cca0b2 100644
--- a/pkg/bloombuild/common/tsdb.go
+++ b/pkg/bloombuild/common/tsdb.go
@@ -13,7 +13,7 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
baseStore "github.com/grafana/loki/v3/pkg/storage"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
@@ -102,7 +102,7 @@ func (b *BloomTSDBStore) LoadTSDB(
}
defer data.Close()
- decompressorPool := chunkenc.GetReaderPool(chunkenc.EncGZIP)
+ decompressorPool := compression.GetReaderPool(compression.EncGZIP)
decompressor, err := decompressorPool.GetReader(data)
if err != nil {
return nil, errors.Wrap(err, "failed to get decompressor")
diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go
index 32f8d5798a7f2..fbd3a7bac5305 100644
--- a/pkg/bloombuild/planner/planner_test.go
+++ b/pkg/bloombuild/planner/planner_test.go
@@ -23,7 +23,7 @@ import (
"github.com/grafana/loki/v3/pkg/bloombuild/common"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/storage"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
@@ -188,7 +188,7 @@ func genBlock(ref bloomshipper.BlockRef) (bloomshipper.Block, error) {
writer := v1.NewMemoryBlockWriter(indexBuf, bloomsBuf)
reader := v1.NewByteReader(indexBuf, bloomsBuf)
- blockOpts := v1.NewBlockOptions(chunkenc.EncNone, 4, 1, 0, 0)
+ blockOpts := v1.NewBlockOptions(compression.EncNone, 4, 1, 0, 0)
builder, err := v1.NewBlockBuilder(blockOpts, writer)
if err != nil {
diff --git a/pkg/chunkenc/dumb_chunk.go b/pkg/chunkenc/dumb_chunk.go
index 33df4501927bc..e28298605118c 100644
--- a/pkg/chunkenc/dumb_chunk.go
+++ b/pkg/chunkenc/dumb_chunk.go
@@ -6,6 +6,7 @@ import (
"sort"
"time"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -69,7 +70,7 @@ func (c *dumbChunk) Utilization() float64 {
return float64(len(c.entries)) / float64(tmpNumEntries)
}
-func (c *dumbChunk) Encoding() Encoding { return EncNone }
+func (c *dumbChunk) Encoding() compression.Encoding { return compression.EncNone }
// Returns an iterator that goes from _most_ recent to _least_ recent (ie,
// backwards).
diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go
index f0b17c7750f3d..057fc8b985ad3 100644
--- a/pkg/chunkenc/interface.go
+++ b/pkg/chunkenc/interface.go
@@ -5,9 +5,9 @@ import (
"errors"
"fmt"
"io"
- "strings"
"time"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -48,86 +48,6 @@ func IsOutOfOrderErr(err error) bool {
return err == ErrOutOfOrder || IsErrTooFarBehind(err)
}
-// Encoding is the identifier for a chunk encoding.
-type Encoding byte
-
-// The different available encodings.
-// Make sure to preserve the order, as these numeric values are written to the chunks!
-const (
- EncNone Encoding = iota
- EncGZIP
- EncDumb
- EncLZ4_64k
- EncSnappy
- EncLZ4_256k
- EncLZ4_1M
- EncLZ4_4M
- EncFlate
- EncZstd
-)
-
-var supportedEncoding = []Encoding{
- EncNone,
- EncGZIP,
- EncLZ4_64k,
- EncSnappy,
- EncLZ4_256k,
- EncLZ4_1M,
- EncLZ4_4M,
- EncFlate,
- EncZstd,
-}
-
-func (e Encoding) String() string {
- switch e {
- case EncGZIP:
- return "gzip"
- case EncNone:
- return "none"
- case EncDumb:
- return "dumb"
- case EncLZ4_64k:
- return "lz4-64k"
- case EncLZ4_256k:
- return "lz4-256k"
- case EncLZ4_1M:
- return "lz4-1M"
- case EncLZ4_4M:
- return "lz4"
- case EncSnappy:
- return "snappy"
- case EncFlate:
- return "flate"
- case EncZstd:
- return "zstd"
- default:
- return "unknown"
- }
-}
-
-// ParseEncoding parses an chunk encoding (compression algorithm) by its name.
-func ParseEncoding(enc string) (Encoding, error) {
- for _, e := range supportedEncoding {
- if strings.EqualFold(e.String(), enc) {
- return e, nil
- }
- }
- return 0, fmt.Errorf("invalid encoding: %s, supported: %s", enc, SupportedEncoding())
-
-}
-
-// SupportedEncoding returns the list of supported Encoding.
-func SupportedEncoding() string {
- var sb strings.Builder
- for i := range supportedEncoding {
- sb.WriteString(supportedEncoding[i].String())
- if i != len(supportedEncoding)-1 {
- sb.WriteString(", ")
- }
- }
- return sb.String()
-}
-
// Chunk is the interface for the compressed logs chunk format.
type Chunk interface {
Bounds() (time.Time, time.Time)
@@ -148,7 +68,7 @@ type Chunk interface {
UncompressedSize() int
CompressedSize() int
Close() error
- Encoding() Encoding
+ Encoding() compression.Encoding
Rebound(start, end time.Time, filter filter.Func) (Chunk, error)
}
diff --git a/pkg/chunkenc/interface_test.go b/pkg/chunkenc/interface_test.go
index ed81c4d3604e4..8faed8e2c43fe 100644
--- a/pkg/chunkenc/interface_test.go
+++ b/pkg/chunkenc/interface_test.go
@@ -7,29 +7,6 @@ import (
"github.com/stretchr/testify/require"
)
-func TestParseEncoding(t *testing.T) {
- tests := []struct {
- enc string
- want Encoding
- wantErr bool
- }{
- {"gzip", EncGZIP, false},
- {"bad", 0, true},
- }
- for _, tt := range tests {
- t.Run(tt.enc, func(t *testing.T) {
- got, err := ParseEncoding(tt.enc)
- if (err != nil) != tt.wantErr {
- t.Errorf("ParseEncoding() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if got != tt.want {
- t.Errorf("ParseEncoding() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
func TestIsOutOfOrderErr(t *testing.T) {
now := time.Now()
diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go
index 328e91c94deb3..03f33b8176729 100644
--- a/pkg/chunkenc/memchunk.go
+++ b/pkg/chunkenc/memchunk.go
@@ -16,6 +16,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -131,7 +132,7 @@ type MemChunk struct {
head HeadBlock
format byte
- encoding Encoding
+ encoding compression.Encoding
headFmt HeadBlockFmt
// compressed size of chunk. Set when chunk is cut or while decoding chunk from storage.
@@ -196,7 +197,7 @@ func (hb *headBlock) Append(ts int64, line string, _ labels.Labels) (bool, error
return false, nil
}
-func (hb *headBlock) Serialise(pool WriterPool) ([]byte, error) {
+func (hb *headBlock) Serialise(pool compression.WriterPool) ([]byte, error) {
inBuf := serializeBytesBufferPool.Get().(*bytes.Buffer)
defer func() {
inBuf.Reset()
@@ -354,7 +355,7 @@ type entry struct {
}
// NewMemChunk returns a new in-mem chunk.
-func NewMemChunk(chunkFormat byte, enc Encoding, head HeadBlockFmt, blockSize, targetSize int) *MemChunk {
+func NewMemChunk(chunkFormat byte, enc compression.Encoding, head HeadBlockFmt, blockSize, targetSize int) *MemChunk {
return newMemChunkWithFormat(chunkFormat, enc, head, blockSize, targetSize)
}
@@ -369,7 +370,7 @@ func panicIfInvalidFormat(chunkFmt byte, head HeadBlockFmt) {
}
// NewMemChunk returns a new in-mem chunk.
-func newMemChunkWithFormat(format byte, enc Encoding, head HeadBlockFmt, blockSize, targetSize int) *MemChunk {
+func newMemChunkWithFormat(format byte, enc compression.Encoding, head HeadBlockFmt, blockSize, targetSize int) *MemChunk {
panicIfInvalidFormat(format, head)
symbolizer := newSymbolizer()
@@ -413,10 +414,10 @@ func newByteChunk(b []byte, blockSize, targetSize int, fromCheckpoint bool) (*Me
bc.format = version
switch version {
case ChunkFormatV1:
- bc.encoding = EncGZIP
+ bc.encoding = compression.EncGZIP
case ChunkFormatV2, ChunkFormatV3, ChunkFormatV4:
// format v2+ has a byte for block encoding.
- enc := Encoding(db.byte())
+ enc := compression.Encoding(db.byte())
if db.err() != nil {
return nil, errors.Wrap(db.err(), "verifying encoding")
}
@@ -535,7 +536,7 @@ func newByteChunk(b []byte, blockSize, targetSize int, fromCheckpoint bool) (*Me
if fromCheckpoint {
bc.symbolizer = symbolizerFromCheckpoint(lb)
} else {
- symbolizer, err := symbolizerFromEnc(lb, GetReaderPool(bc.encoding))
+ symbolizer, err := symbolizerFromEnc(lb, compression.GetReaderPool(bc.encoding))
if err != nil {
return nil, err
}
@@ -653,7 +654,7 @@ func (c *MemChunk) writeTo(w io.Writer, forCheckpoint bool) (int64, error) {
}
} else {
var err error
- n, crcHash, err = c.symbolizer.SerializeTo(w, GetWriterPool(c.encoding))
+ n, crcHash, err = c.symbolizer.SerializeTo(w, compression.GetWriterPool(c.encoding))
if err != nil {
return offset, errors.Wrap(err, "write structured metadata")
}
@@ -776,7 +777,7 @@ func MemchunkFromCheckpoint(chk, head []byte, desiredIfNotUnordered HeadBlockFmt
}
// Encoding implements Chunk.
-func (c *MemChunk) Encoding() Encoding {
+func (c *MemChunk) Encoding() compression.Encoding {
return c.encoding
}
@@ -941,7 +942,7 @@ func (c *MemChunk) cut() error {
return nil
}
- b, err := c.head.Serialise(GetWriterPool(c.encoding))
+ b, err := c.head.Serialise(compression.GetWriterPool(c.encoding))
if err != nil {
return err
}
@@ -1172,7 +1173,7 @@ func (c *MemChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, err
// then allows us to bind a decoding context to a block when requested, but otherwise helps reduce the
// chances of chunk<>block encoding drift in the codebase as the latter is parameterized by the former.
type encBlock struct {
- enc Encoding
+ enc compression.Encoding
format byte
symbolizer *symbolizer
block
@@ -1182,14 +1183,14 @@ func (b encBlock) Iterator(ctx context.Context, pipeline log.StreamPipeline) ite
if len(b.b) == 0 {
return iter.NoopEntryIterator
}
- return newEntryIterator(ctx, GetReaderPool(b.enc), b.b, pipeline, b.format, b.symbolizer)
+ return newEntryIterator(ctx, compression.GetReaderPool(b.enc), b.b, pipeline, b.format, b.symbolizer)
}
func (b encBlock) SampleIterator(ctx context.Context, extractor log.StreamSampleExtractor) iter.SampleIterator {
if len(b.b) == 0 {
return iter.NoopSampleIterator
}
- return newSampleIterator(ctx, GetReaderPool(b.enc), b.b, b.format, extractor, b.symbolizer)
+ return newSampleIterator(ctx, compression.GetReaderPool(b.enc), b.b, b.format, extractor, b.symbolizer)
}
func (b block) Offset() int {
@@ -1339,7 +1340,7 @@ type bufferedIterator struct {
stats *stats.Context
reader io.Reader
- pool ReaderPool
+ pool compression.ReaderPool
symbolizer *symbolizer
err error
@@ -1358,7 +1359,7 @@ type bufferedIterator struct {
closed bool
}
-func newBufferedIterator(ctx context.Context, pool ReaderPool, b []byte, format byte, symbolizer *symbolizer) *bufferedIterator {
+func newBufferedIterator(ctx context.Context, pool compression.ReaderPool, b []byte, format byte, symbolizer *symbolizer) *bufferedIterator {
stats := stats.FromContext(ctx)
stats.AddCompressedBytes(int64(len(b)))
return &bufferedIterator{
@@ -1619,7 +1620,7 @@ func (si *bufferedIterator) close() {
si.origBytes = nil
}
-func newEntryIterator(ctx context.Context, pool ReaderPool, b []byte, pipeline log.StreamPipeline, format byte, symbolizer *symbolizer) iter.EntryIterator {
+func newEntryIterator(ctx context.Context, pool compression.ReaderPool, b []byte, pipeline log.StreamPipeline, format byte, symbolizer *symbolizer) iter.EntryIterator {
return &entryBufferedIterator{
bufferedIterator: newBufferedIterator(ctx, pool, b, format, symbolizer),
pipeline: pipeline,
@@ -1671,7 +1672,7 @@ func (e *entryBufferedIterator) Close() error {
return e.bufferedIterator.Close()
}
-func newSampleIterator(ctx context.Context, pool ReaderPool, b []byte, format byte, extractor log.StreamSampleExtractor, symbolizer *symbolizer) iter.SampleIterator {
+func newSampleIterator(ctx context.Context, pool compression.ReaderPool, b []byte, format byte, extractor log.StreamSampleExtractor, symbolizer *symbolizer) iter.SampleIterator {
return &sampleBufferedIterator{
bufferedIterator: newBufferedIterator(ctx, pool, b, format, symbolizer),
extractor: extractor,
diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go
index 85cccd743cfbb..987a5d88b286e 100644
--- a/pkg/chunkenc/memchunk_test.go
+++ b/pkg/chunkenc/memchunk_test.go
@@ -22,6 +22,7 @@ import (
"github.com/grafana/loki/pkg/push"
"github.com/grafana/loki/v3/pkg/chunkenc/testdata"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -31,16 +32,16 @@ import (
"github.com/grafana/loki/v3/pkg/util/filter"
)
-var testEncoding = []Encoding{
- EncNone,
- EncGZIP,
- EncLZ4_64k,
- EncLZ4_256k,
- EncLZ4_1M,
- EncLZ4_4M,
- EncSnappy,
- EncFlate,
- EncZstd,
+var testEncodings = []compression.Encoding{
+ compression.EncNone,
+ compression.EncGZIP,
+ compression.EncLZ4_64k,
+ compression.EncLZ4_256k,
+ compression.EncLZ4_1M,
+ compression.EncLZ4_4M,
+ compression.EncSnappy,
+ compression.EncFlate,
+ compression.EncZstd,
}
var (
@@ -84,7 +85,7 @@ const (
)
func TestBlocksInclusive(t *testing.T) {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
for _, format := range allPossibleFormats {
chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt
@@ -103,7 +104,7 @@ func TestBlocksInclusive(t *testing.T) {
}
func TestBlock(t *testing.T) {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
for _, format := range allPossibleFormats {
chunkFormat, headBlockFmt := format.chunkFormat, format.headBlockFmt
@@ -258,7 +259,7 @@ func TestBlock(t *testing.T) {
}
func TestCorruptChunk(t *testing.T) {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
for _, format := range allPossibleFormats {
chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt
@@ -298,7 +299,7 @@ func TestCorruptChunk(t *testing.T) {
func TestReadFormatV1(t *testing.T) {
t.Parallel()
- c := NewMemChunk(ChunkFormatV3, EncGZIP, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
+ c := NewMemChunk(ChunkFormatV3, compression.EncGZIP, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
fillChunk(c)
// overrides to v1 for testing that specific version.
c.format = ChunkFormatV1
@@ -335,7 +336,7 @@ func TestReadFormatV1(t *testing.T) {
// 2) []byte loaded chunks <-> []byte loaded chunks
func TestRoundtripV2(t *testing.T) {
for _, testData := range allPossibleFormats {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
t.Run(testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) {
t.Parallel()
@@ -390,12 +391,12 @@ func TestRoundtripV2(t *testing.T) {
}
}
-func testNameWithFormats(enc Encoding, chunkFormat byte, headBlockFmt HeadBlockFmt) string {
+func testNameWithFormats(enc compression.Encoding, chunkFormat byte, headBlockFmt HeadBlockFmt) string {
return fmt.Sprintf("encoding:%v chunkFormat:%v headBlockFmt:%v", enc, chunkFormat, headBlockFmt)
}
func TestRoundtripV3(t *testing.T) {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
for _, format := range allPossibleFormats {
chunkfmt, headfmt := format.chunkFormat, format.headBlockFmt
@@ -420,7 +421,7 @@ func TestRoundtripV3(t *testing.T) {
func TestSerialization(t *testing.T) {
for _, testData := range allPossibleFormats {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
// run tests with and without structured metadata since it is optional
for _, appendWithStructuredMetadata := range []bool{false, true} {
@@ -509,7 +510,7 @@ func TestSerialization(t *testing.T) {
func TestChunkFilling(t *testing.T) {
for _, testData := range allPossibleFormats {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
t.Run(testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) {
t.Parallel()
@@ -557,7 +558,7 @@ func TestChunkFilling(t *testing.T) {
func TestGZIPChunkTargetSize(t *testing.T) {
t.Parallel()
- chk := NewMemChunk(ChunkFormatV3, EncGZIP, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
+ chk := NewMemChunk(ChunkFormatV3, compression.EncGZIP, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize)
lineSize := 512
entry := &logproto.Entry{
@@ -680,7 +681,7 @@ func TestMemChunk_AppendOutOfOrder(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
- tester(t, NewMemChunk(ChunkFormatV3, EncGZIP, f, testBlockSize, testTargetSize))
+ tester(t, NewMemChunk(ChunkFormatV3, compression.EncGZIP, f, testBlockSize, testTargetSize))
})
}
}
@@ -696,7 +697,7 @@ func TestChunkSize(t *testing.T) {
var result []res
for _, bs := range testBlockSizes {
for _, f := range allPossibleFormats {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
name := fmt.Sprintf("%s_%s", enc.String(), humanize.Bytes(uint64(bs)))
t.Run(name, func(t *testing.T) {
c := newMemChunkWithFormat(f.chunkFormat, enc, f.headBlockFmt, bs, testTargetSize)
@@ -725,7 +726,7 @@ func TestChunkSize(t *testing.T) {
}
func TestChunkStats(t *testing.T) {
- c := NewMemChunk(ChunkFormatV4, EncSnappy, DefaultTestHeadBlockFmt, testBlockSize, 0)
+ c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, DefaultTestHeadBlockFmt, testBlockSize, 0)
first := time.Now()
entry := &logproto.Entry{
Timestamp: first,
@@ -797,7 +798,7 @@ func TestChunkStats(t *testing.T) {
func TestIteratorClose(t *testing.T) {
for _, f := range allPossibleFormats {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
t.Run(enc.String(), func(t *testing.T) {
for _, test := range []func(iter iter.EntryIterator, t *testing.T){
func(iter iter.EntryIterator, t *testing.T) {
@@ -846,7 +847,7 @@ func BenchmarkWrite(b *testing.B) {
i := int64(0)
for _, f := range HeadBlockFmts {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
for _, withStructuredMetadata := range []bool{false, true} {
name := fmt.Sprintf("%v-%v", f, enc)
if withStructuredMetadata {
@@ -896,7 +897,7 @@ func (nomatchPipeline) ReferencedStructuredMetadata() bool {
func BenchmarkRead(b *testing.B) {
for _, bs := range testBlockSizes {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
name := fmt.Sprintf("%s_%s", enc.String(), humanize.Bytes(uint64(bs)))
b.Run(name, func(b *testing.B) {
chunks, size := generateData(enc, 5, bs, testTargetSize)
@@ -923,7 +924,7 @@ func BenchmarkRead(b *testing.B) {
}
for _, bs := range testBlockSizes {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
name := fmt.Sprintf("sample_%s_%s", enc.String(), humanize.Bytes(uint64(bs)))
b.Run(name, func(b *testing.B) {
chunks, size := generateData(enc, 5, bs, testTargetSize)
@@ -967,7 +968,7 @@ func BenchmarkBackwardIterator(b *testing.B) {
for _, bs := range testBlockSizes {
b.Run(humanize.Bytes(uint64(bs)), func(b *testing.B) {
b.ReportAllocs()
- c := NewMemChunk(ChunkFormatV4, EncSnappy, DefaultTestHeadBlockFmt, bs, testTargetSize)
+ c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, DefaultTestHeadBlockFmt, bs, testTargetSize)
_ = fillChunk(c)
b.ResetTimer()
for n := 0; n < b.N; n++ {
@@ -988,7 +989,7 @@ func BenchmarkBackwardIterator(b *testing.B) {
}
func TestGenerateDataSize(t *testing.T) {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
t.Run(enc.String(), func(t *testing.T) {
chunks, size := generateData(enc, 50, testBlockSize, testTargetSize)
@@ -1081,7 +1082,7 @@ func BenchmarkHeadBlockSampleIterator(b *testing.B) {
func TestMemChunk_IteratorBounds(t *testing.T) {
createChunk := func() *MemChunk {
t.Helper()
- c := NewMemChunk(ChunkFormatV3, EncNone, DefaultTestHeadBlockFmt, 1e6, 1e6)
+ c := NewMemChunk(ChunkFormatV3, compression.EncNone, DefaultTestHeadBlockFmt, 1e6, 1e6)
if _, err := c.Append(&logproto.Entry{
Timestamp: time.Unix(0, 1),
@@ -1141,7 +1142,7 @@ func TestMemChunk_IteratorBounds(t *testing.T) {
}
func TestMemchunkLongLine(t *testing.T) {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
t.Run(enc.String(), func(t *testing.T) {
t.Parallel()
@@ -1167,9 +1168,9 @@ func TestMemchunkLongLine(t *testing.T) {
func TestBytesWith(t *testing.T) {
t.Parallel()
- exp, err := NewMemChunk(ChunkFormatV3, EncNone, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize).BytesWith(nil)
+ exp, err := NewMemChunk(ChunkFormatV3, compression.EncNone, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize).BytesWith(nil)
require.Nil(t, err)
- out, err := NewMemChunk(ChunkFormatV3, EncNone, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize).BytesWith([]byte{1, 2, 3})
+ out, err := NewMemChunk(ChunkFormatV3, compression.EncNone, DefaultTestHeadBlockFmt, testBlockSize, testTargetSize).BytesWith([]byte{1, 2, 3})
require.Nil(t, err)
require.Equal(t, exp, out)
@@ -1180,8 +1181,8 @@ func TestCheckpointEncoding(t *testing.T) {
blockSize, targetSize := 256*1024, 1500*1024
for _, f := range allPossibleFormats {
- t.Run(testNameWithFormats(EncSnappy, f.chunkFormat, f.headBlockFmt), func(t *testing.T) {
- c := newMemChunkWithFormat(f.chunkFormat, EncSnappy, f.headBlockFmt, blockSize, targetSize)
+ t.Run(testNameWithFormats(compression.EncSnappy, f.chunkFormat, f.headBlockFmt), func(t *testing.T) {
+ c := newMemChunkWithFormat(f.chunkFormat, compression.EncSnappy, f.headBlockFmt, blockSize, targetSize)
// add a few entries
for i := 0; i < 5; i++ {
@@ -1266,7 +1267,7 @@ var (
func BenchmarkBufferedIteratorLabels(b *testing.B) {
for _, f := range HeadBlockFmts {
b.Run(f.String(), func(b *testing.B) {
- c := NewMemChunk(ChunkFormatV3, EncSnappy, f, testBlockSize, testTargetSize)
+ c := NewMemChunk(ChunkFormatV3, compression.EncSnappy, f, testBlockSize, testTargetSize)
_ = fillChunk(c)
labelsSet := []labels.Labels{
@@ -1366,8 +1367,8 @@ func BenchmarkBufferedIteratorLabels(b *testing.B) {
func Test_HeadIteratorReverse(t *testing.T) {
for _, testData := range allPossibleFormats {
- t.Run(testNameWithFormats(EncSnappy, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) {
- c := newMemChunkWithFormat(testData.chunkFormat, EncSnappy, testData.headBlockFmt, testBlockSize, testTargetSize)
+ t.Run(testNameWithFormats(compression.EncSnappy, testData.chunkFormat, testData.headBlockFmt), func(t *testing.T) {
+ c := newMemChunkWithFormat(testData.chunkFormat, compression.EncSnappy, testData.headBlockFmt, testBlockSize, testTargetSize)
genEntry := func(i int64) *logproto.Entry {
return &logproto.Entry{
Timestamp: time.Unix(0, i),
@@ -1482,7 +1483,7 @@ func TestMemChunk_Rebound(t *testing.T) {
}
func buildTestMemChunk(t *testing.T, from, through time.Time) *MemChunk {
- chk := NewMemChunk(ChunkFormatV3, EncGZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
+ chk := NewMemChunk(ChunkFormatV3, compression.EncGZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
for ; from.Before(through); from = from.Add(time.Second) {
_, err := chk.Append(&logproto.Entry{
Line: from.String(),
@@ -1603,7 +1604,7 @@ func TestMemChunk_ReboundAndFilter_with_filter(t *testing.T) {
}
func buildFilterableTestMemChunk(t *testing.T, from, through time.Time, matchingFrom, matchingTo *time.Time, withStructuredMetadata bool) *MemChunk {
- chk := NewMemChunk(ChunkFormatV4, EncGZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
+ chk := NewMemChunk(ChunkFormatV4, compression.EncGZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
t.Logf("from : %v", from.String())
t.Logf("through: %v", through.String())
var structuredMetadata push.LabelsAdapter
@@ -1752,7 +1753,7 @@ func TestMemChunk_SpaceFor(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
for _, format := range allPossibleFormats {
t.Run(fmt.Sprintf("chunk_v%d_head_%s", format.chunkFormat, format.headBlockFmt), func(t *testing.T) {
- chk := newMemChunkWithFormat(format.chunkFormat, EncNone, format.headBlockFmt, 1024, tc.targetSize)
+ chk := newMemChunkWithFormat(format.chunkFormat, compression.EncNone, format.headBlockFmt, 1024, tc.targetSize)
chk.blocks = make([]block, tc.nBlocks)
chk.cutBlockSize = tc.cutBlockSize
@@ -1775,7 +1776,7 @@ func TestMemChunk_SpaceFor(t *testing.T) {
}
func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) {
- for _, enc := range testEncoding {
+ for _, enc := range testEncodings {
enc := enc
t.Run(enc.String(), func(t *testing.T) {
streamLabels := labels.Labels{
@@ -2054,7 +2055,7 @@ func TestDecodeChunkIncorrectBlockOffset(t *testing.T) {
t.Run(fmt.Sprintf("chunkFormat:%v headBlockFmt:%v", format.chunkFormat, format.headBlockFmt), func(t *testing.T) {
for incorrectOffsetBlockNum := 0; incorrectOffsetBlockNum < 3; incorrectOffsetBlockNum++ {
t.Run(fmt.Sprintf("inorrect offset block: %d", incorrectOffsetBlockNum), func(t *testing.T) {
- chk := NewMemChunk(format.chunkFormat, EncNone, format.headBlockFmt, blockSize, testTargetSize)
+ chk := NewMemChunk(format.chunkFormat, compression.EncNone, format.headBlockFmt, blockSize, testTargetSize)
ts := time.Now().Unix()
for i := 0; i < 3; i++ {
dup, err := chk.Append(&logproto.Entry{
diff --git a/pkg/chunkenc/pool.go b/pkg/chunkenc/pool.go
index 486bef44b3da8..8c640149a78fa 100644
--- a/pkg/chunkenc/pool.go
+++ b/pkg/chunkenc/pool.go
@@ -1,49 +1,15 @@
package chunkenc
import (
- "bufio"
"bytes"
- "io"
- "runtime"
"sync"
- "github.com/golang/snappy"
- "github.com/klauspost/compress/flate"
- "github.com/klauspost/compress/gzip"
- "github.com/klauspost/compress/zstd"
- "github.com/pierrec/lz4/v4"
"github.com/prometheus/prometheus/util/pool"
"github.com/grafana/loki/v3/pkg/logproto"
)
-// WriterPool is a pool of io.Writer
-// This is used by every chunk to avoid unnecessary allocations.
-type WriterPool interface {
- GetWriter(io.Writer) io.WriteCloser
- PutWriter(io.WriteCloser)
-}
-
-// ReaderPool similar to WriterPool but for reading chunks.
-type ReaderPool interface {
- GetReader(io.Reader) (io.Reader, error)
- PutReader(io.Reader)
-}
-
var (
- // Gzip is the gnu zip compression pool
- Gzip = GzipPool{level: gzip.DefaultCompression}
- Lz4_64k = LZ4Pool{bufferSize: 1 << 16} // Lz4_64k is the l4z compression pool, with 64k buffer size
- Lz4_256k = LZ4Pool{bufferSize: 1 << 18} // Lz4_256k uses 256k buffer
- Lz4_1M = LZ4Pool{bufferSize: 1 << 20} // Lz4_1M uses 1M buffer
- Lz4_4M = LZ4Pool{bufferSize: 1 << 22} // Lz4_4M uses 4M buffer
- Flate = FlatePool{}
- Zstd = ZstdPool{}
- // Snappy is the snappy compression pool
- Snappy SnappyPool
- // Noop is the no compression pool
- Noop NoopPool
-
// BytesBufferPool is a bytes buffer used for lines decompressed.
// Buckets [0.5KB,1KB,2KB,4KB,8KB]
BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) })
@@ -81,315 +47,3 @@ var (
},
}
)
-
-func GetWriterPool(enc Encoding) WriterPool {
- return GetReaderPool(enc).(WriterPool)
-}
-
-func GetReaderPool(enc Encoding) ReaderPool {
- switch enc {
- case EncGZIP:
- return &Gzip
- case EncLZ4_64k:
- return &Lz4_64k
- case EncLZ4_256k:
- return &Lz4_256k
- case EncLZ4_1M:
- return &Lz4_1M
- case EncLZ4_4M:
- return &Lz4_4M
- case EncSnappy:
- return &Snappy
- case EncNone:
- return &Noop
- case EncFlate:
- return &Flate
- case EncZstd:
- return &Zstd
- default:
- panic("unknown encoding")
- }
-}
-
-// GzipPool is a gun zip compression pool
-type GzipPool struct {
- readers sync.Pool
- writers sync.Pool
- level int
-}
-
-// Gzip needs buffering to read efficiently.
-// We need to be able to see the underlying gzip.Reader to Reset it.
-type gzipBufferedReader struct {
- *bufio.Reader
- gzipReader *gzip.Reader
-}
-
-// GetReader gets or creates a new CompressionReader and reset it to read from src
-func (pool *GzipPool) GetReader(src io.Reader) (io.Reader, error) {
- if r := pool.readers.Get(); r != nil {
- reader := r.(*gzipBufferedReader)
- err := reader.gzipReader.Reset(src)
- if err != nil {
- return nil, err
- }
- reader.Reader.Reset(reader.gzipReader)
- return reader, nil
- }
- gzipReader, err := gzip.NewReader(src)
- if err != nil {
- return nil, err
- }
- return &gzipBufferedReader{
- gzipReader: gzipReader,
- Reader: bufio.NewReaderSize(gzipReader, 4*1024),
- }, nil
-}
-
-// PutReader places back in the pool a CompressionReader
-func (pool *GzipPool) PutReader(reader io.Reader) {
- pool.readers.Put(reader)
-}
-
-// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-func (pool *GzipPool) GetWriter(dst io.Writer) io.WriteCloser {
- if w := pool.writers.Get(); w != nil {
- writer := w.(*gzip.Writer)
- writer.Reset(dst)
- return writer
- }
-
- level := pool.level
- if level == 0 {
- level = gzip.DefaultCompression
- }
- w, err := gzip.NewWriterLevel(dst, level)
- if err != nil {
- panic(err) // never happens, error is only returned on wrong compression level.
- }
- return w
-}
-
-// PutWriter places back in the pool a CompressionWriter
-func (pool *GzipPool) PutWriter(writer io.WriteCloser) {
- pool.writers.Put(writer)
-}
-
-// FlatePool is a flate compression pool
-type FlatePool struct {
- readers sync.Pool
- writers sync.Pool
- level int
-}
-
-// GetReader gets or creates a new CompressionReader and reset it to read from src
-func (pool *FlatePool) GetReader(src io.Reader) (io.Reader, error) {
- if r := pool.readers.Get(); r != nil {
- reader := r.(flate.Resetter)
- err := reader.Reset(src, nil)
- if err != nil {
- panic(err)
- }
- return reader.(io.Reader), nil
- }
- return flate.NewReader(src), nil
-}
-
-// PutReader places back in the pool a CompressionReader
-func (pool *FlatePool) PutReader(reader io.Reader) {
- pool.readers.Put(reader)
-}
-
-// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-func (pool *FlatePool) GetWriter(dst io.Writer) io.WriteCloser {
- if w := pool.writers.Get(); w != nil {
- writer := w.(*flate.Writer)
- writer.Reset(dst)
- return writer
- }
-
- level := pool.level
- if level == 0 {
- level = flate.DefaultCompression
- }
- w, err := flate.NewWriter(dst, level)
- if err != nil {
- panic(err) // never happens, error is only returned on wrong compression level.
- }
- return w
-}
-
-// PutWriter places back in the pool a CompressionWriter
-func (pool *FlatePool) PutWriter(writer io.WriteCloser) {
- pool.writers.Put(writer)
-}
-
-// GzipPool is a gun zip compression pool
-type ZstdPool struct {
- readers sync.Pool
- writers sync.Pool
-}
-
-// GetReader gets or creates a new CompressionReader and reset it to read from src
-func (pool *ZstdPool) GetReader(src io.Reader) (io.Reader, error) {
- if r := pool.readers.Get(); r != nil {
- reader := r.(*zstd.Decoder)
- err := reader.Reset(src)
- if err != nil {
- return nil, err
- }
- return reader, nil
- }
- reader, err := zstd.NewReader(src)
- if err != nil {
- return nil, err
- }
- runtime.SetFinalizer(reader, (*zstd.Decoder).Close)
- return reader, nil
-}
-
-// PutReader places back in the pool a CompressionReader
-func (pool *ZstdPool) PutReader(reader io.Reader) {
- pool.readers.Put(reader)
-}
-
-// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-func (pool *ZstdPool) GetWriter(dst io.Writer) io.WriteCloser {
- if w := pool.writers.Get(); w != nil {
- writer := w.(*zstd.Encoder)
- writer.Reset(dst)
- return writer
- }
-
- w, err := zstd.NewWriter(dst)
- if err != nil {
- panic(err) // never happens, error is only returned on wrong compression level.
- }
- return w
-}
-
-// PutWriter places back in the pool a CompressionWriter
-func (pool *ZstdPool) PutWriter(writer io.WriteCloser) {
- pool.writers.Put(writer)
-}
-
-type LZ4Pool struct {
- readers sync.Pool
- writers sync.Pool
- bufferSize uint32 // available values: 1<<16 (64k), 1<<18 (256k), 1<<20 (1M), 1<<22 (4M). Defaults to 4MB, if not set.
-}
-
-// We need to be able to see the underlying lz4.Reader to Reset it.
-type lz4BufferedReader struct {
- *bufio.Reader
- lz4Reader *lz4.Reader
-}
-
-// GetReader gets or creates a new CompressionReader and reset it to read from src
-func (pool *LZ4Pool) GetReader(src io.Reader) (io.Reader, error) {
- var r *lz4BufferedReader
- if pooled := pool.readers.Get(); pooled != nil {
- r = pooled.(*lz4BufferedReader)
- r.lz4Reader.Reset(src)
- r.Reader.Reset(r.lz4Reader)
- } else {
- lz4Reader := lz4.NewReader(src)
- r = &lz4BufferedReader{
- lz4Reader: lz4Reader,
- Reader: bufio.NewReaderSize(lz4Reader, 4*1024),
- }
- }
- return r, nil
-}
-
-// PutReader places back in the pool a CompressionReader
-func (pool *LZ4Pool) PutReader(reader io.Reader) {
- pool.readers.Put(reader)
-}
-
-// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-func (pool *LZ4Pool) GetWriter(dst io.Writer) io.WriteCloser {
- var w *lz4.Writer
- if fromPool := pool.writers.Get(); fromPool != nil {
- w = fromPool.(*lz4.Writer)
- w.Reset(dst)
- } else {
- w = lz4.NewWriter(dst)
- }
- err := w.Apply(
- lz4.ChecksumOption(false),
- lz4.BlockSizeOption(lz4.BlockSize(pool.bufferSize)),
- lz4.CompressionLevelOption(lz4.Fast),
- )
- if err != nil {
- panic(err)
- }
- return w
-}
-
-// PutWriter places back in the pool a CompressionWriter
-func (pool *LZ4Pool) PutWriter(writer io.WriteCloser) {
- pool.writers.Put(writer)
-}
-
-type SnappyPool struct {
- readers sync.Pool
- writers sync.Pool
-}
-
-// GetReader gets or creates a new CompressionReader and reset it to read from src
-func (pool *SnappyPool) GetReader(src io.Reader) (io.Reader, error) {
- if r := pool.readers.Get(); r != nil {
- reader := r.(*snappy.Reader)
- reader.Reset(src)
- return reader, nil
- }
- return snappy.NewReader(src), nil
-}
-
-// PutReader places back in the pool a CompressionReader
-func (pool *SnappyPool) PutReader(reader io.Reader) {
- r := reader.(*snappy.Reader)
- // Reset to free reference to the underlying reader
- r.Reset(nil)
- pool.readers.Put(reader)
-}
-
-// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-func (pool *SnappyPool) GetWriter(dst io.Writer) io.WriteCloser {
- if w := pool.writers.Get(); w != nil {
- writer := w.(*snappy.Writer)
- writer.Reset(dst)
- return writer
- }
- return snappy.NewBufferedWriter(dst)
-}
-
-// PutWriter places back in the pool a CompressionWriter
-func (pool *SnappyPool) PutWriter(writer io.WriteCloser) {
- pool.writers.Put(writer)
-}
-
-type NoopPool struct{}
-
-// GetReader gets or creates a new CompressionReader and reset it to read from src
-func (pool *NoopPool) GetReader(src io.Reader) (io.Reader, error) {
- return src, nil
-}
-
-// PutReader places back in the pool a CompressionReader
-func (pool *NoopPool) PutReader(_ io.Reader) {}
-
-type noopCloser struct {
- io.Writer
-}
-
-func (noopCloser) Close() error { return nil }
-
-// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
-func (pool *NoopPool) GetWriter(dst io.Writer) io.WriteCloser {
- return noopCloser{dst}
-}
-
-// PutWriter places back in the pool a CompressionWriter
-func (pool *NoopPool) PutWriter(_ io.WriteCloser) {}
diff --git a/pkg/chunkenc/pool_test.go b/pkg/chunkenc/pool_test.go
deleted file mode 100644
index 04ecaadf9295e..0000000000000
--- a/pkg/chunkenc/pool_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package chunkenc
-
-import (
- "bytes"
- "io"
- "os"
- "runtime"
- "runtime/pprof"
- "sync"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestPool(t *testing.T) {
- var wg sync.WaitGroup
- for _, enc := range supportedEncoding {
- enc := enc
- for i := 0; i < 200; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- var (
- buf = bytes.NewBuffer(nil)
- res = make([]byte, 1024)
- wpool = GetWriterPool(enc)
- rpool = GetReaderPool(enc)
- )
-
- w := wpool.GetWriter(buf)
- defer wpool.PutWriter(w)
- _, err := w.Write([]byte("test"))
- require.NoError(t, err)
- require.NoError(t, w.Close())
-
- require.True(t, buf.Len() != 0, enc)
- r, err := rpool.GetReader(bytes.NewBuffer(buf.Bytes()))
- require.NoError(t, err)
- defer rpool.PutReader(r)
- n, err := r.Read(res)
- if err != nil {
- require.Error(t, err, io.EOF)
- }
- require.Equal(t, 4, n, enc.String())
- require.Equal(t, []byte("test"), res[:n], enc)
- }()
- }
- }
-
- wg.Wait()
-
- if !assert.Eventually(t, func() bool {
- runtime.GC()
- return runtime.NumGoroutine() <= 50
- }, 5*time.Second, 10*time.Millisecond) {
- _ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
- }
-}
diff --git a/pkg/chunkenc/symbols.go b/pkg/chunkenc/symbols.go
index e9f0b49529689..f5d3310921abe 100644
--- a/pkg/chunkenc/symbols.go
+++ b/pkg/chunkenc/symbols.go
@@ -12,6 +12,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/util"
)
@@ -163,7 +164,7 @@ func (s *symbolizer) CheckpointSize() int {
// SerializeTo serializes all the labels and writes to the writer in compressed format.
// It returns back the number of bytes written and a checksum of the data written.
-func (s *symbolizer) SerializeTo(w io.Writer, pool WriterPool) (int, []byte, error) {
+func (s *symbolizer) SerializeTo(w io.Writer, pool compression.WriterPool) (int, []byte, error) {
crc32Hash := crc32HashPool.Get().(hash.Hash32)
defer crc32HashPool.Put(crc32Hash)
@@ -324,7 +325,7 @@ func symbolizerFromCheckpoint(b []byte) *symbolizer {
}
// symbolizerFromEnc builds symbolizer from the bytes generated during serialization.
-func symbolizerFromEnc(b []byte, pool ReaderPool) (*symbolizer, error) {
+func symbolizerFromEnc(b []byte, pool compression.ReaderPool) (*symbolizer, error) {
db := decbuf{b: b}
numLabels := db.uvarint()
diff --git a/pkg/chunkenc/symbols_test.go b/pkg/chunkenc/symbols_test.go
index 7882001c75dd5..1f286d7b56d50 100644
--- a/pkg/chunkenc/symbols_test.go
+++ b/pkg/chunkenc/symbols_test.go
@@ -8,6 +8,8 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/compression"
)
func TestSymbolizer(t *testing.T) {
@@ -125,7 +127,7 @@ func TestSymbolizer(t *testing.T) {
expectedUncompressedSize: 22,
},
} {
- for _, encoding := range testEncoding {
+ for _, encoding := range testEncodings {
t.Run(fmt.Sprintf("%s - %s", tc.name, encoding), func(t *testing.T) {
s := newSymbolizer()
for i, labels := range tc.labelsToAdd {
@@ -161,10 +163,10 @@ func TestSymbolizer(t *testing.T) {
}
buf.Reset()
- _, _, err = s.SerializeTo(buf, GetWriterPool(encoding))
+ _, _, err = s.SerializeTo(buf, compression.GetWriterPool(encoding))
require.NoError(t, err)
- loaded, err = symbolizerFromEnc(buf.Bytes(), GetReaderPool(encoding))
+ loaded, err = symbolizerFromEnc(buf.Bytes(), compression.GetReaderPool(encoding))
require.NoError(t, err)
for i, symbols := range tc.expectedSymbols {
require.Equal(t, tc.labelsToAdd[i], loaded.Lookup(symbols, nil))
diff --git a/pkg/chunkenc/unordered.go b/pkg/chunkenc/unordered.go
index aed6606c7c6d8..3132c77206abb 100644
--- a/pkg/chunkenc/unordered.go
+++ b/pkg/chunkenc/unordered.go
@@ -14,6 +14,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -28,7 +29,7 @@ type HeadBlock interface {
CheckpointBytes(b []byte) ([]byte, error)
CheckpointSize() int
LoadBytes(b []byte) error
- Serialise(pool WriterPool) ([]byte, error)
+ Serialise(pool compression.WriterPool) ([]byte, error)
Reset()
Bounds() (mint, maxt int64)
Entries() int
@@ -373,7 +374,7 @@ func (hb *unorderedHeadBlock) SampleIterator(
// nolint:unused
// serialise is used in creating an ordered, compressed block from an unorderedHeadBlock
-func (hb *unorderedHeadBlock) Serialise(pool WriterPool) ([]byte, error) {
+func (hb *unorderedHeadBlock) Serialise(pool compression.WriterPool) ([]byte, error) {
inBuf := serializeBytesBufferPool.Get().(*bytes.Buffer)
defer func() {
inBuf.Reset()
diff --git a/pkg/chunkenc/unordered_test.go b/pkg/chunkenc/unordered_test.go
index 8a3420965bdb7..fb341aaa8db93 100644
--- a/pkg/chunkenc/unordered_test.go
+++ b/pkg/chunkenc/unordered_test.go
@@ -12,6 +12,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -450,7 +451,7 @@ func BenchmarkHeadBlockWrites(b *testing.B) {
}
func TestUnorderedChunkIterators(t *testing.T) {
- c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
+ c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for i := 0; i < 100; i++ {
// push in reverse order
dup, err := c.Append(&logproto.Entry{
@@ -496,11 +497,11 @@ func TestUnorderedChunkIterators(t *testing.T) {
}
func BenchmarkUnorderedRead(b *testing.B) {
- legacy := NewMemChunk(ChunkFormatV3, EncSnappy, OrderedHeadBlockFmt, testBlockSize, testTargetSize)
+ legacy := NewMemChunk(ChunkFormatV3, compression.EncSnappy, OrderedHeadBlockFmt, testBlockSize, testTargetSize)
fillChunkClose(legacy, false)
- ordered := NewMemChunk(ChunkFormatV3, EncSnappy, UnorderedHeadBlockFmt, testBlockSize, testTargetSize)
+ ordered := NewMemChunk(ChunkFormatV3, compression.EncSnappy, UnorderedHeadBlockFmt, testBlockSize, testTargetSize)
fillChunkClose(ordered, false)
- unordered := NewMemChunk(ChunkFormatV3, EncSnappy, UnorderedHeadBlockFmt, testBlockSize, testTargetSize)
+ unordered := NewMemChunk(ChunkFormatV3, compression.EncSnappy, UnorderedHeadBlockFmt, testBlockSize, testTargetSize)
fillChunkRandomOrder(unordered, false)
tcs := []struct {
@@ -558,7 +559,7 @@ func BenchmarkUnorderedRead(b *testing.B) {
}
func TestUnorderedIteratorCountsAllEntries(t *testing.T) {
- c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
+ c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
fillChunkRandomOrder(c, false)
ct := 0
@@ -595,7 +596,7 @@ func TestUnorderedIteratorCountsAllEntries(t *testing.T) {
}
func chunkFrom(xs []logproto.Entry) ([]byte, error) {
- c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
+ c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for _, x := range xs {
if _, err := c.Append(&x); err != nil {
return nil, err
@@ -655,7 +656,7 @@ func TestReorder(t *testing.T) {
},
} {
t.Run(tc.desc, func(t *testing.T) {
- c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
+ c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for _, x := range tc.input {
dup, err := c.Append(&x)
require.False(t, dup)
@@ -674,7 +675,7 @@ func TestReorder(t *testing.T) {
}
func TestReorderAcrossBlocks(t *testing.T) {
- c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
+ c := NewMemChunk(ChunkFormatV4, compression.EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for _, batch := range [][]int{
// ensure our blocks have overlapping bounds and must be reordered
// before closing.
diff --git a/pkg/chunkenc/util_test.go b/pkg/chunkenc/util_test.go
index 3da8f9e6d5cb8..0d75273d6c81e 100644
--- a/pkg/chunkenc/util_test.go
+++ b/pkg/chunkenc/util_test.go
@@ -5,6 +5,7 @@ import (
"time"
"github.com/grafana/loki/v3/pkg/chunkenc/testdata"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/logproto"
)
@@ -23,7 +24,7 @@ func logprotoEntryWithStructuredMetadata(ts int64, line string, structuredMetada
}
}
-func generateData(enc Encoding, chunksCount, blockSize, targetSize int) ([]Chunk, uint64) {
+func generateData(enc compression.Encoding, chunksCount, blockSize, targetSize int) ([]Chunk, uint64) {
chunks := []Chunk{}
i := int64(0)
size := uint64(0)
diff --git a/pkg/compactor/deletion/delete_requests_table.go b/pkg/compactor/deletion/delete_requests_table.go
index 80a47a5e6435b..7d4c5cf4d254a 100644
--- a/pkg/compactor/deletion/delete_requests_table.go
+++ b/pkg/compactor/deletion/delete_requests_table.go
@@ -13,7 +13,7 @@ import (
"github.com/go-kit/log/level"
"go.etcd.io/bbolt"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/local"
"github.com/grafana/loki/v3/pkg/storage/stores/series/index"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage"
@@ -117,8 +117,9 @@ func (t *deleteRequestsTable) uploadFile() error {
}()
err = t.db.View(func(tx *bbolt.Tx) (err error) {
- compressedWriter := chunkenc.Gzip.GetWriter(f)
- defer chunkenc.Gzip.PutWriter(compressedWriter)
+ gzipPool := compression.GetWriterPool(compression.EncGZIP)
+ compressedWriter := gzipPool.GetWriter(f)
+ defer gzipPool.PutWriter(compressedWriter)
defer func() {
cerr := compressedWriter.Close()
diff --git a/pkg/compactor/index_set.go b/pkg/compactor/index_set.go
index 7102aef564259..76b5546a96289 100644
--- a/pkg/compactor/index_set.go
+++ b/pkg/compactor/index_set.go
@@ -12,8 +12,8 @@ import (
"github.com/go-kit/log/level"
"github.com/pkg/errors"
- "github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/compactor/retention"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/index"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage"
@@ -229,8 +229,9 @@ func (is *indexSet) upload() error {
}
}()
- compressedWriter := chunkenc.Gzip.GetWriter(f)
- defer chunkenc.Gzip.PutWriter(compressedWriter)
+ gzipPool := compression.GetWriterPool(compression.EncGZIP)
+ compressedWriter := gzipPool.GetWriter(f)
+ defer gzipPool.PutWriter(compressedWriter)
idxReader, err := idx.Reader()
if err != nil {
diff --git a/pkg/compactor/retention/retention_test.go b/pkg/compactor/retention/retention_test.go
index 4885c835003c2..3b124691087ad 100644
--- a/pkg/compactor/retention/retention_test.go
+++ b/pkg/compactor/retention/retention_test.go
@@ -21,6 +21,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
ingesterclient "github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -220,7 +221,7 @@ func createChunk(t testing.TB, userID string, lbs labels.Labels, from model.Time
labelsBuilder.Set(labels.MetricName, "logs")
metric := labelsBuilder.Labels()
fp := ingesterclient.Fingerprint(lbs)
- chunkEnc := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, blockSize, targetSize)
+ chunkEnc := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncSnappy, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, blockSize, targetSize)
for ts := from; !ts.After(through); ts = ts.Add(1 * time.Minute) {
dup, err := chunkEnc.Append(&logproto.Entry{
diff --git a/pkg/compression/encoding.go b/pkg/compression/encoding.go
new file mode 100644
index 0000000000000..6b421ed976441
--- /dev/null
+++ b/pkg/compression/encoding.go
@@ -0,0 +1,86 @@
+package compression
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Encoding identifies an available compression type.
+type Encoding byte
+
+// The different available encodings.
+// Make sure to preserve the order, as the numeric values are serialized!
+const (
+ EncNone Encoding = iota
+ EncGZIP
+ EncDumb
+ EncLZ4_64k
+ EncSnappy
+ EncLZ4_256k
+ EncLZ4_1M
+ EncLZ4_4M
+ EncFlate
+ EncZstd
+)
+
+var supportedEncoding = []Encoding{
+ EncNone,
+ EncGZIP,
+ EncLZ4_64k,
+ EncSnappy,
+ EncLZ4_256k,
+ EncLZ4_1M,
+ EncLZ4_4M,
+ EncFlate,
+ EncZstd,
+}
+
+func (e Encoding) String() string {
+ switch e {
+ case EncGZIP:
+ return "gzip"
+ case EncNone:
+ return "none"
+ case EncDumb:
+ return "dumb"
+ case EncLZ4_64k:
+ return "lz4-64k"
+ case EncLZ4_256k:
+ return "lz4-256k"
+ case EncLZ4_1M:
+ return "lz4-1M"
+ case EncLZ4_4M:
+ return "lz4"
+ case EncSnappy:
+ return "snappy"
+ case EncFlate:
+ return "flate"
+ case EncZstd:
+ return "zstd"
+ default:
+ return "unknown"
+ }
+}
+
+// ParseEncoding parses an chunk encoding (compression algorithm) by its name.
+func ParseEncoding(enc string) (Encoding, error) {
+ for _, e := range supportedEncoding {
+ if strings.EqualFold(e.String(), enc) {
+ return e, nil
+ }
+ }
+ return 0, fmt.Errorf("invalid encoding: %s, supported: %s", enc, SupportedEncoding())
+
+}
+
+// SupportedEncoding returns the list of supported Encoding.
+func SupportedEncoding() string {
+ var sb strings.Builder
+ for i := range supportedEncoding {
+ sb.WriteString(supportedEncoding[i].String())
+ if i != len(supportedEncoding)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ return sb.String()
+}
diff --git a/pkg/compression/encoding_test.go b/pkg/compression/encoding_test.go
new file mode 100644
index 0000000000000..d67323ebb2d4f
--- /dev/null
+++ b/pkg/compression/encoding_test.go
@@ -0,0 +1,26 @@
+package compression
+
+import "testing"
+
+func TestParseEncoding(t *testing.T) {
+ tests := []struct {
+ enc string
+ want Encoding
+ wantErr bool
+ }{
+ {"gzip", EncGZIP, false},
+ {"bad", 0, true},
+ }
+ for _, tt := range tests {
+ t.Run(tt.enc, func(t *testing.T) {
+ got, err := ParseEncoding(tt.enc)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ParseEncoding() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("ParseEncoding() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/compression/pool.go b/pkg/compression/pool.go
new file mode 100644
index 0000000000000..b68ff7de47b1c
--- /dev/null
+++ b/pkg/compression/pool.go
@@ -0,0 +1,368 @@
+package compression
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+ "sync"
+
+ snappylib "github.com/golang/snappy"
+ flatelib "github.com/klauspost/compress/flate"
+ gziplib "github.com/klauspost/compress/gzip"
+ zstdlib "github.com/klauspost/compress/zstd"
+ lz4lib "github.com/pierrec/lz4/v4"
+)
+
+// WriterPool is a pool of io.Writer
+// This is used by every chunk to avoid unnecessary allocations.
+type WriterPool interface {
+ GetWriter(io.Writer) io.WriteCloser
+ PutWriter(io.WriteCloser)
+}
+
+// ReaderPool is a pool of io.Reader
+// ReaderPool similar to WriterPool but for reading chunks.
+type ReaderPool interface {
+ GetReader(io.Reader) (io.Reader, error)
+ PutReader(io.Reader)
+}
+
+// ReaderPool is a pool of io.Reader and io.Writer
+type ReaderWriterPool interface {
+ ReaderPool
+ WriterPool
+}
+
+var (
+ // gzip is the gnu zip compression pool
+ gzip = GzipPool{level: gziplib.DefaultCompression}
+ // lz4_* are the lz4 compression pools
+ lz4_64k = LZ4Pool{bufferSize: 1 << 16} // lz4_64k is the l4z compression pool, with 64k buffer size
+ lz4_256k = LZ4Pool{bufferSize: 1 << 18} // lz4_256k uses 256k buffer
+ lz4_1M = LZ4Pool{bufferSize: 1 << 20} // lz4_1M uses 1M buffer
+ lz4_4M = LZ4Pool{bufferSize: 1 << 22} // lz4_4M uses 4M buffer
+ // flate is the flate compression pool
+ flate = FlatePool{}
+ // zstd is the zstd compression pool
+ zstd = ZstdPool{}
+ // snappy is the snappy compression pool
+ snappy = SnappyPool{}
+ // noop is the no compression pool
+ noop = NoopPool{}
+)
+
+func GetWriterPool(enc Encoding) WriterPool {
+ return GetPool(enc).(WriterPool)
+}
+
+func GetReaderPool(enc Encoding) ReaderPool {
+ return GetPool(enc).(ReaderPool)
+}
+
+func GetPool(enc Encoding) ReaderWriterPool {
+ switch enc {
+ case EncGZIP:
+ return &gzip
+ case EncLZ4_64k:
+ return &lz4_64k
+ case EncLZ4_256k:
+ return &lz4_256k
+ case EncLZ4_1M:
+ return &lz4_1M
+ case EncLZ4_4M:
+ return &lz4_4M
+ case EncSnappy:
+ return &snappy
+ case EncNone:
+ return &noop
+ case EncFlate:
+ return &flate
+ case EncZstd:
+ return &zstd
+ default:
+ panic("unknown encoding")
+ }
+}
+
+// GzipPool is a gnu zip compression pool
+type GzipPool struct {
+ readers sync.Pool
+ writers sync.Pool
+ level int
+}
+
+// Gzip needs buffering to read efficiently.
+// We need to be able to see the underlying gzip.Reader to Reset it.
+type gzipBufferedReader struct {
+ *bufio.Reader
+ gzipReader *gziplib.Reader
+}
+
+// GetReader gets or creates a new CompressionReader and reset it to read from src
+func (pool *GzipPool) GetReader(src io.Reader) (io.Reader, error) {
+ if r := pool.readers.Get(); r != nil {
+ reader := r.(*gzipBufferedReader)
+ err := reader.gzipReader.Reset(src)
+ if err != nil {
+ return nil, err
+ }
+ reader.Reader.Reset(reader.gzipReader)
+ return reader, nil
+ }
+ gzipReader, err := gziplib.NewReader(src)
+ if err != nil {
+ return nil, err
+ }
+ return &gzipBufferedReader{
+ gzipReader: gzipReader,
+ Reader: bufio.NewReaderSize(gzipReader, 4*1024),
+ }, nil
+}
+
+// PutReader places back in the pool a CompressionReader
+func (pool *GzipPool) PutReader(reader io.Reader) {
+ pool.readers.Put(reader)
+}
+
+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
+func (pool *GzipPool) GetWriter(dst io.Writer) io.WriteCloser {
+ if w := pool.writers.Get(); w != nil {
+ writer := w.(*gziplib.Writer)
+ writer.Reset(dst)
+ return writer
+ }
+
+ level := pool.level
+ if level == 0 {
+ level = gziplib.DefaultCompression
+ }
+ w, err := gziplib.NewWriterLevel(dst, level)
+ if err != nil {
+ panic(err) // never happens, error is only returned on wrong compression level.
+ }
+ return w
+}
+
+// PutWriter places back in the pool a CompressionWriter
+func (pool *GzipPool) PutWriter(writer io.WriteCloser) {
+ pool.writers.Put(writer)
+}
+
+// FlatePool is a flate compression pool
+type FlatePool struct {
+ readers sync.Pool
+ writers sync.Pool
+ level int
+}
+
+// GetReader gets or creates a new CompressionReader and reset it to read from src
+func (pool *FlatePool) GetReader(src io.Reader) (io.Reader, error) {
+ if r := pool.readers.Get(); r != nil {
+ reader := r.(flatelib.Resetter)
+ err := reader.Reset(src, nil)
+ if err != nil {
+ panic(err)
+ }
+ return reader.(io.Reader), nil
+ }
+ return flatelib.NewReader(src), nil
+}
+
+// PutReader places back in the pool a CompressionReader
+func (pool *FlatePool) PutReader(reader io.Reader) {
+ pool.readers.Put(reader)
+}
+
+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
+func (pool *FlatePool) GetWriter(dst io.Writer) io.WriteCloser {
+ if w := pool.writers.Get(); w != nil {
+ writer := w.(*flatelib.Writer)
+ writer.Reset(dst)
+ return writer
+ }
+
+ level := pool.level
+ if level == 0 {
+ level = flatelib.DefaultCompression
+ }
+ w, err := flatelib.NewWriter(dst, level)
+ if err != nil {
+ panic(err) // never happens, error is only returned on wrong compression level.
+ }
+ return w
+}
+
+// PutWriter places back in the pool a CompressionWriter
+func (pool *FlatePool) PutWriter(writer io.WriteCloser) {
+ pool.writers.Put(writer)
+}
+
+// GzipPool is a gun zip compression pool
+type ZstdPool struct {
+ readers sync.Pool
+ writers sync.Pool
+}
+
+// GetReader gets or creates a new CompressionReader and reset it to read from src
+func (pool *ZstdPool) GetReader(src io.Reader) (io.Reader, error) {
+ if r := pool.readers.Get(); r != nil {
+ reader := r.(*zstdlib.Decoder)
+ err := reader.Reset(src)
+ if err != nil {
+ return nil, err
+ }
+ return reader, nil
+ }
+ reader, err := zstdlib.NewReader(src)
+ if err != nil {
+ return nil, err
+ }
+ runtime.SetFinalizer(reader, (*zstdlib.Decoder).Close)
+ return reader, nil
+}
+
+// PutReader places back in the pool a CompressionReader
+func (pool *ZstdPool) PutReader(reader io.Reader) {
+ pool.readers.Put(reader)
+}
+
+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
+func (pool *ZstdPool) GetWriter(dst io.Writer) io.WriteCloser {
+ if w := pool.writers.Get(); w != nil {
+ writer := w.(*zstdlib.Encoder)
+ writer.Reset(dst)
+ return writer
+ }
+
+ w, err := zstdlib.NewWriter(dst)
+ if err != nil {
+ panic(err) // never happens, error is only returned on wrong compression level.
+ }
+ return w
+}
+
+// PutWriter places back in the pool a CompressionWriter
+func (pool *ZstdPool) PutWriter(writer io.WriteCloser) {
+ pool.writers.Put(writer)
+}
+
+type LZ4Pool struct {
+ readers sync.Pool
+ writers sync.Pool
+ bufferSize uint32 // available values: 1<<16 (64k), 1<<18 (256k), 1<<20 (1M), 1<<22 (4M). Defaults to 4MB, if not set.
+}
+
+// We need to be able to see the underlying lz4.Reader to Reset it.
+type lz4BufferedReader struct {
+ *bufio.Reader
+ lz4Reader *lz4lib.Reader
+}
+
+// GetReader gets or creates a new CompressionReader and reset it to read from src
+func (pool *LZ4Pool) GetReader(src io.Reader) (io.Reader, error) {
+ var r *lz4BufferedReader
+ if pooled := pool.readers.Get(); pooled != nil {
+ r = pooled.(*lz4BufferedReader)
+ r.lz4Reader.Reset(src)
+ r.Reader.Reset(r.lz4Reader)
+ } else {
+ lz4Reader := lz4lib.NewReader(src)
+ r = &lz4BufferedReader{
+ lz4Reader: lz4Reader,
+ Reader: bufio.NewReaderSize(lz4Reader, 4*1024),
+ }
+ }
+ return r, nil
+}
+
+// PutReader places back in the pool a CompressionReader
+func (pool *LZ4Pool) PutReader(reader io.Reader) {
+ pool.readers.Put(reader)
+}
+
+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
+func (pool *LZ4Pool) GetWriter(dst io.Writer) io.WriteCloser {
+ var w *lz4lib.Writer
+ if fromPool := pool.writers.Get(); fromPool != nil {
+ w = fromPool.(*lz4lib.Writer)
+ w.Reset(dst)
+ } else {
+ w = lz4lib.NewWriter(dst)
+ }
+ err := w.Apply(
+ lz4lib.ChecksumOption(false),
+ lz4lib.BlockSizeOption(lz4lib.BlockSize(pool.bufferSize)),
+ lz4lib.CompressionLevelOption(lz4lib.Fast),
+ )
+ if err != nil {
+ panic(err)
+ }
+ return w
+}
+
+// PutWriter places back in the pool a CompressionWriter
+func (pool *LZ4Pool) PutWriter(writer io.WriteCloser) {
+ pool.writers.Put(writer)
+}
+
+type SnappyPool struct {
+ readers sync.Pool
+ writers sync.Pool
+}
+
+// GetReader gets or creates a new CompressionReader and reset it to read from src
+func (pool *SnappyPool) GetReader(src io.Reader) (io.Reader, error) {
+ if r := pool.readers.Get(); r != nil {
+ reader := r.(*snappylib.Reader)
+ reader.Reset(src)
+ return reader, nil
+ }
+ return snappylib.NewReader(src), nil
+}
+
+// PutReader places back in the pool a CompressionReader
+func (pool *SnappyPool) PutReader(reader io.Reader) {
+ r := reader.(*snappylib.Reader)
+ // Reset to free reference to the underlying reader
+ r.Reset(nil)
+ pool.readers.Put(reader)
+}
+
+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
+func (pool *SnappyPool) GetWriter(dst io.Writer) io.WriteCloser {
+ if w := pool.writers.Get(); w != nil {
+ writer := w.(*snappylib.Writer)
+ writer.Reset(dst)
+ return writer
+ }
+ return snappylib.NewBufferedWriter(dst)
+}
+
+// PutWriter places back in the pool a CompressionWriter
+func (pool *SnappyPool) PutWriter(writer io.WriteCloser) {
+ pool.writers.Put(writer)
+}
+
+type NoopPool struct{}
+
+// GetReader gets or creates a new CompressionReader and reset it to read from src
+func (pool *NoopPool) GetReader(src io.Reader) (io.Reader, error) {
+ return src, nil
+}
+
+// PutReader places back in the pool a CompressionReader
+func (pool *NoopPool) PutReader(_ io.Reader) {}
+
+type noopCloser struct {
+ io.Writer
+}
+
+func (noopCloser) Close() error { return nil }
+
+// GetWriter gets or creates a new CompressionWriter and reset it to write to dst
+func (pool *NoopPool) GetWriter(dst io.Writer) io.WriteCloser {
+ return noopCloser{dst}
+}
+
+// PutWriter places back in the pool a CompressionWriter
+func (pool *NoopPool) PutWriter(_ io.WriteCloser) {}
diff --git a/pkg/compression/pool_test.go b/pkg/compression/pool_test.go
new file mode 100644
index 0000000000000..b39bbe0ad6f4d
--- /dev/null
+++ b/pkg/compression/pool_test.go
@@ -0,0 +1,64 @@
+package compression
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPool(t *testing.T) {
+ for _, enc := range supportedEncoding {
+ enc := enc
+ t.Run(enc.String(), func(t *testing.T) {
+ var wg sync.WaitGroup
+
+ for i := 0; i < 200; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ var (
+ buf = bytes.NewBuffer(nil)
+ res = make([]byte, 1024)
+ wpool = GetWriterPool(enc)
+ rpool = GetReaderPool(enc)
+ )
+
+ w := wpool.GetWriter(buf)
+ defer wpool.PutWriter(w)
+ _, err := w.Write([]byte("test"))
+ require.NoError(t, err)
+ require.NoError(t, w.Close())
+
+ require.True(t, buf.Len() != 0, enc)
+ r, err := rpool.GetReader(bytes.NewBuffer(buf.Bytes()))
+ require.NoError(t, err)
+ defer rpool.PutReader(r)
+ n, err := r.Read(res)
+ if err != nil {
+ require.Error(t, err, io.EOF)
+ }
+ require.Equal(t, 4, n, enc.String())
+ require.Equal(t, []byte("test"), res[:n], enc)
+ }()
+ }
+
+ wg.Wait()
+
+ if !assert.Eventually(t, func() bool {
+ runtime.GC()
+ return runtime.NumGoroutine() <= 50
+ }, 5*time.Second, 10*time.Millisecond) {
+ _ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
+ }
+
+ })
+ }
+}
diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go
index 5a816a3b779df..88e770d0c2dac 100644
--- a/pkg/ingester/checkpoint_test.go
+++ b/pkg/ingester/checkpoint_test.go
@@ -16,6 +16,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
@@ -565,7 +566,7 @@ func buildChunks(t testing.TB, size int) []Chunk {
for i := 0; i < size; i++ {
// build chunks of 256k blocks, 1.5MB target size. Same as default config.
- c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV3, chunkenc.EncGZIP, chunkenc.UnorderedHeadBlockFmt, 256*1024, 1500*1024)
+ c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV3, compression.EncGZIP, chunkenc.UnorderedHeadBlockFmt, 256*1024, 1500*1024)
fillChunk(t, c)
descs = append(descs, chunkDesc{
chunk: c,
diff --git a/pkg/ingester/chunk_test.go b/pkg/ingester/chunk_test.go
index f6a16731e6d44..961b256ea58c6 100644
--- a/pkg/ingester/chunk_test.go
+++ b/pkg/ingester/chunk_test.go
@@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/iter"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
@@ -49,7 +50,7 @@ func TestIterator(t *testing.T) {
}{
{"dumbChunk", chunkenc.NewDumbChunk},
{"gzipChunk", func() chunkenc.Chunk {
- return chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
+ return chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
}},
} {
t.Run(chk.name, func(t *testing.T) {
diff --git a/pkg/ingester/encoding_test.go b/pkg/ingester/encoding_test.go
index 458da1132c963..ee2ad1d8f681a 100644
--- a/pkg/ingester/encoding_test.go
+++ b/pkg/ingester/encoding_test.go
@@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/ingester/wal"
"github.com/grafana/loki/v3/pkg/logproto"
)
@@ -58,7 +59,7 @@ func Test_EncodingChunks(t *testing.T) {
t.Run(fmt.Sprintf("%v-%s", close, tc.desc), func(t *testing.T) {
conf := tc.conf
- c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
+ c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
fillChunk(t, c)
if close {
require.Nil(t, c.Close())
@@ -121,7 +122,7 @@ func Test_EncodingChunks(t *testing.T) {
func Test_EncodingCheckpoint(t *testing.T) {
conf := dummyConf()
- c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
+ c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
dup, err := c.Append(&logproto.Entry{
Timestamp: time.Unix(1, 0),
Line: "hi there",
diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go
index 69462a3d352a5..5ef40d9d17d09 100644
--- a/pkg/ingester/flush_test.go
+++ b/pkg/ingester/flush_test.go
@@ -24,6 +24,7 @@ import (
"github.com/grafana/dskit/tenant"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/ingester/wal"
@@ -188,7 +189,7 @@ func buildChunkDecs(t testing.TB) []*chunkDesc {
for i := range res {
res[i] = &chunkDesc{
closed: true,
- chunk: chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, dummyConf().BlockSize, dummyConf().TargetChunkSize),
+ chunk: chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncSnappy, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, dummyConf().BlockSize, dummyConf().TargetChunkSize),
}
fillChunk(t, res[i].chunk)
require.NoError(t, res[i].chunk.Close())
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 9c913f9049f44..81c3c1a68350a 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -14,14 +14,6 @@ import (
"sync"
"time"
- "github.com/grafana/loki/v3/pkg/kafka"
- "github.com/grafana/loki/v3/pkg/kafka/partitionring"
- "github.com/grafana/loki/v3/pkg/loghttp/push"
- "github.com/grafana/loki/v3/pkg/logqlmodel/metadata"
- "github.com/grafana/loki/v3/pkg/storage/types"
-
- lokilog "github.com/grafana/loki/v3/pkg/logql/log"
-
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/backoff"
@@ -38,17 +30,20 @@ import (
"golang.org/x/time/rate"
"google.golang.org/grpc/health/grpc_health_v1"
- server_util "github.com/grafana/loki/v3/pkg/util/server"
-
"github.com/grafana/loki/v3/pkg/analytics"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/ingester/index"
"github.com/grafana/loki/v3/pkg/iter"
+ "github.com/grafana/loki/v3/pkg/kafka"
+ "github.com/grafana/loki/v3/pkg/kafka/partitionring"
+ "github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql"
+ lokilog "github.com/grafana/loki/v3/pkg/logql/log"
"github.com/grafana/loki/v3/pkg/logql/syntax"
+ "github.com/grafana/loki/v3/pkg/logqlmodel/metadata"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
"github.com/grafana/loki/v3/pkg/querier/plan"
"github.com/grafana/loki/v3/pkg/runtime"
@@ -59,8 +54,10 @@ import (
indexstore "github.com/grafana/loki/v3/pkg/storage/stores/index"
"github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume"
index_stats "github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
+ "github.com/grafana/loki/v3/pkg/storage/types"
"github.com/grafana/loki/v3/pkg/util"
util_log "github.com/grafana/loki/v3/pkg/util/log"
+ server_util "github.com/grafana/loki/v3/pkg/util/server"
"github.com/grafana/loki/v3/pkg/util/wal"
)
@@ -90,18 +87,18 @@ var (
type Config struct {
LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty" doc:"description=Configures how the lifecycle of the ingester will operate and where it will register for discovery."`
- ConcurrentFlushes int `yaml:"concurrent_flushes"`
- FlushCheckPeriod time.Duration `yaml:"flush_check_period"`
- FlushOpBackoff backoff.Config `yaml:"flush_op_backoff"`
- FlushOpTimeout time.Duration `yaml:"flush_op_timeout"`
- RetainPeriod time.Duration `yaml:"chunk_retain_period"`
- MaxChunkIdle time.Duration `yaml:"chunk_idle_period"`
- BlockSize int `yaml:"chunk_block_size"`
- TargetChunkSize int `yaml:"chunk_target_size"`
- ChunkEncoding string `yaml:"chunk_encoding"`
- parsedEncoding chunkenc.Encoding `yaml:"-"` // placeholder for validated encoding
- MaxChunkAge time.Duration `yaml:"max_chunk_age"`
- AutoForgetUnhealthy bool `yaml:"autoforget_unhealthy"`
+ ConcurrentFlushes int `yaml:"concurrent_flushes"`
+ FlushCheckPeriod time.Duration `yaml:"flush_check_period"`
+ FlushOpBackoff backoff.Config `yaml:"flush_op_backoff"`
+ FlushOpTimeout time.Duration `yaml:"flush_op_timeout"`
+ RetainPeriod time.Duration `yaml:"chunk_retain_period"`
+ MaxChunkIdle time.Duration `yaml:"chunk_idle_period"`
+ BlockSize int `yaml:"chunk_block_size"`
+ TargetChunkSize int `yaml:"chunk_target_size"`
+ ChunkEncoding string `yaml:"chunk_encoding"`
+ parsedEncoding compression.Encoding `yaml:"-"` // placeholder for validated encoding
+ MaxChunkAge time.Duration `yaml:"max_chunk_age"`
+ AutoForgetUnhealthy bool `yaml:"autoforget_unhealthy"`
// Synchronization settings. Used to make sure that ingesters cut their chunks at the same moments.
SyncPeriod time.Duration `yaml:"sync_period"`
@@ -151,7 +148,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.MaxChunkIdle, "ingester.chunks-idle-period", 30*time.Minute, "How long chunks should sit in-memory with no updates before being flushed if they don't hit the max block size. This means that half-empty chunks will still be flushed after a certain period as long as they receive no further activity.")
f.IntVar(&cfg.BlockSize, "ingester.chunks-block-size", 256*1024, "The targeted _uncompressed_ size in bytes of a chunk block When this threshold is exceeded the head block will be cut and compressed inside the chunk.")
f.IntVar(&cfg.TargetChunkSize, "ingester.chunk-target-size", 1572864, "A target _compressed_ size in bytes for chunks. This is a desired size not an exact size, chunks may be slightly bigger or significantly smaller if they get flushed for other reasons (e.g. chunk_idle_period). A value of 0 creates chunks with a fixed 10 blocks, a non zero value will create chunks with a variable number of blocks to meet the target size.") // 1.5 MB
- f.StringVar(&cfg.ChunkEncoding, "ingester.chunk-encoding", chunkenc.EncGZIP.String(), fmt.Sprintf("The algorithm to use for compressing chunk. (%s)", chunkenc.SupportedEncoding()))
+ f.StringVar(&cfg.ChunkEncoding, "ingester.chunk-encoding", compression.EncGZIP.String(), fmt.Sprintf("The algorithm to use for compressing chunk. (%s)", compression.SupportedEncoding()))
f.DurationVar(&cfg.SyncPeriod, "ingester.sync-period", 1*time.Hour, "Parameters used to synchronize ingesters to cut chunks at the same moment. Sync period is used to roll over incoming entry to a new chunk. If chunk's utilization isn't high enough (eg. less than 50% when sync_min_utilization is set to 0.5), then this chunk rollover doesn't happen.")
f.Float64Var(&cfg.SyncMinUtilization, "ingester.sync-min-utilization", 0.1, "Minimum utilization of chunk when doing synchronization.")
f.IntVar(&cfg.MaxReturnedErrors, "ingester.max-ignored-stream-errors", 10, "The maximum number of errors a stream will report to the user when a push fails. 0 to make unlimited.")
@@ -165,7 +162,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
}
func (cfg *Config) Validate() error {
- enc, err := chunkenc.ParseEncoding(cfg.ChunkEncoding)
+ enc, err := compression.ParseEncoding(cfg.ChunkEncoding)
if err != nil {
return err
}
diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
index 17d34b57dc549..bd43daec7a31f 100644
--- a/pkg/ingester/ingester_test.go
+++ b/pkg/ingester/ingester_test.go
@@ -32,7 +32,7 @@ import (
"github.com/grafana/dskit/tenant"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/ingester/index"
@@ -697,7 +697,7 @@ func TestValidate(t *testing.T) {
}{
{
in: Config{
- ChunkEncoding: chunkenc.EncGZIP.String(),
+ ChunkEncoding: compression.EncGZIP.String(),
FlushOpBackoff: backoff.Config{
MinBackoff: 100 * time.Millisecond,
MaxBackoff: 10 * time.Second,
@@ -708,7 +708,7 @@ func TestValidate(t *testing.T) {
MaxChunkAge: time.Minute,
},
expected: Config{
- ChunkEncoding: chunkenc.EncGZIP.String(),
+ ChunkEncoding: compression.EncGZIP.String(),
FlushOpBackoff: backoff.Config{
MinBackoff: 100 * time.Millisecond,
MaxBackoff: 10 * time.Second,
@@ -717,12 +717,12 @@ func TestValidate(t *testing.T) {
FlushOpTimeout: 15 * time.Second,
IndexShards: index.DefaultIndexShards,
MaxChunkAge: time.Minute,
- parsedEncoding: chunkenc.EncGZIP,
+ parsedEncoding: compression.EncGZIP,
},
},
{
in: Config{
- ChunkEncoding: chunkenc.EncSnappy.String(),
+ ChunkEncoding: compression.EncSnappy.String(),
FlushOpBackoff: backoff.Config{
MinBackoff: 100 * time.Millisecond,
MaxBackoff: 10 * time.Second,
@@ -732,7 +732,7 @@ func TestValidate(t *testing.T) {
IndexShards: index.DefaultIndexShards,
},
expected: Config{
- ChunkEncoding: chunkenc.EncSnappy.String(),
+ ChunkEncoding: compression.EncSnappy.String(),
FlushOpBackoff: backoff.Config{
MinBackoff: 100 * time.Millisecond,
MaxBackoff: 10 * time.Second,
@@ -740,7 +740,7 @@ func TestValidate(t *testing.T) {
},
FlushOpTimeout: 15 * time.Second,
IndexShards: index.DefaultIndexShards,
- parsedEncoding: chunkenc.EncSnappy,
+ parsedEncoding: compression.EncSnappy,
},
},
{
@@ -758,7 +758,7 @@ func TestValidate(t *testing.T) {
},
{
in: Config{
- ChunkEncoding: chunkenc.EncGZIP.String(),
+ ChunkEncoding: compression.EncGZIP.String(),
FlushOpBackoff: backoff.Config{
MinBackoff: 100 * time.Millisecond,
MaxBackoff: 10 * time.Second,
@@ -771,7 +771,7 @@ func TestValidate(t *testing.T) {
},
{
in: Config{
- ChunkEncoding: chunkenc.EncGZIP.String(),
+ ChunkEncoding: compression.EncGZIP.String(),
FlushOpBackoff: backoff.Config{
MinBackoff: 100 * time.Millisecond,
MaxBackoff: 10 * time.Second,
@@ -784,7 +784,7 @@ func TestValidate(t *testing.T) {
},
{
in: Config{
- ChunkEncoding: chunkenc.EncGZIP.String(),
+ ChunkEncoding: compression.EncGZIP.String(),
FlushOpBackoff: backoff.Config{
MinBackoff: 100 * time.Millisecond,
MaxBackoff: 10 * time.Second,
diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go
index 6dbd521f1abc7..9ac86fbd30155 100644
--- a/pkg/ingester/stream_test.go
+++ b/pkg/ingester/stream_test.go
@@ -14,6 +14,7 @@ import (
gokitlog "github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/runtime"
"github.com/grafana/dskit/httpgrpc"
@@ -276,7 +277,7 @@ func TestStreamIterator(t *testing.T) {
{"gzipChunk", func() *chunkenc.MemChunk {
chunkfmt, headfmt := defaultChunkFormat(t)
- return chunkenc.NewMemChunk(chunkfmt, chunkenc.EncGZIP, headfmt, 256*1024, 0)
+ return chunkenc.NewMemChunk(chunkfmt, compression.EncGZIP, headfmt, 256*1024, 0)
}},
} {
t.Run(chk.name, func(t *testing.T) {
diff --git a/pkg/storage/bloom/v1/archive.go b/pkg/storage/bloom/v1/archive.go
index fcc3294eba977..201b071b25000 100644
--- a/pkg/storage/bloom/v1/archive.go
+++ b/pkg/storage/bloom/v1/archive.go
@@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
)
type TarEntry struct {
@@ -23,7 +23,7 @@ func TarGz(dst io.Writer, reader BlockReader) error {
return errors.Wrap(err, "error getting tar entries")
}
- gzipper := chunkenc.GetWriterPool(chunkenc.EncGZIP).GetWriter(dst)
+ gzipper := compression.GetWriterPool(compression.EncGZIP).GetWriter(dst)
defer gzipper.Close()
tarballer := tar.NewWriter(gzipper)
@@ -50,7 +50,7 @@ func TarGz(dst io.Writer, reader BlockReader) error {
}
func UnTarGz(dst string, r io.Reader) error {
- gzipper, err := chunkenc.GetReaderPool(chunkenc.EncGZIP).GetReader(r)
+ gzipper, err := compression.GetReaderPool(compression.EncGZIP).GetReader(r)
if err != nil {
return errors.Wrap(err, "error getting gzip reader")
}
diff --git a/pkg/storage/bloom/v1/archive_test.go b/pkg/storage/bloom/v1/archive_test.go
index 401cc56a218cd..e0d2f69a1c841 100644
--- a/pkg/storage/bloom/v1/archive_test.go
+++ b/pkg/storage/bloom/v1/archive_test.go
@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
v2 "github.com/grafana/loki/v3/pkg/iter/v2"
)
@@ -24,7 +24,7 @@ func TestArchive(t *testing.T) {
BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
},
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
diff --git a/pkg/storage/bloom/v1/bloom.go b/pkg/storage/bloom/v1/bloom.go
index 878f254abc178..b77af18d1aceb 100644
--- a/pkg/storage/bloom/v1/bloom.go
+++ b/pkg/storage/bloom/v1/bloom.go
@@ -7,7 +7,7 @@ import (
"github.com/pkg/errors"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter"
"github.com/grafana/loki/v3/pkg/util/encoding"
"github.com/grafana/loki/v3/pkg/util/mempool"
@@ -71,7 +71,7 @@ func (b *Bloom) Decode(dec *encoding.Decbuf) error {
return nil
}
-func LazyDecodeBloomPage(r io.Reader, alloc mempool.Allocator, pool chunkenc.ReaderPool, page BloomPageHeader) (*BloomPageDecoder, error) {
+func LazyDecodeBloomPage(r io.Reader, alloc mempool.Allocator, pool compression.ReaderPool, page BloomPageHeader) (*BloomPageDecoder, error) {
data, err := alloc.Get(page.Len)
if err != nil {
return nil, errors.Wrap(err, "allocating buffer")
@@ -316,7 +316,7 @@ func (b *BloomBlock) BloomPageDecoder(r io.ReadSeeker, alloc mempool.Allocator,
return nil, false, errors.Wrap(err, "seeking to bloom page")
}
- if b.schema.encoding == chunkenc.EncNone {
+ if b.schema.encoding == compression.EncNone {
res, err = LazyDecodeBloomPageNoCompression(r, alloc, page)
} else {
res, err = LazyDecodeBloomPage(r, alloc, b.schema.DecompressorPool(), page)
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go
index 7e8f5c4c99939..b5145f5f93097 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go
@@ -12,6 +12,7 @@ import (
"github.com/grafana/dskit/multierror"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/iter"
v2 "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/logproto"
@@ -104,7 +105,7 @@ func TestTokenizerPopulate(t *testing.T) {
{Name: "pod", Value: "loki-1"},
{Name: "trace_id", Value: "3bef3c91643bde73"},
}
- memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
+ memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
_, _ = memChunk.Append(&push.Entry{
Timestamp: time.Unix(0, 1),
Line: testLine,
@@ -149,7 +150,7 @@ func TestBloomTokenizerPopulateWithoutPreexistingBloom(t *testing.T) {
{Name: "pod", Value: "loki-1"},
{Name: "trace_id", Value: "3bef3c91643bde73"},
}
- memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
+ memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
_, _ = memChunk.Append(&push.Entry{
Timestamp: time.Unix(0, 1),
Line: testLine,
@@ -186,7 +187,7 @@ func TestBloomTokenizerPopulateWithoutPreexistingBloom(t *testing.T) {
}
func chunkRefItrFromMetadata(metadata ...push.LabelsAdapter) (iter.EntryIterator, error) {
- memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
+ memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
for i, md := range metadata {
if _, err := memChunk.Append(&push.Entry{
Timestamp: time.Unix(0, int64(i)),
@@ -272,7 +273,7 @@ func BenchmarkPopulateSeriesWithBloom(b *testing.B) {
sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8)
- memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
+ memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
_, _ = memChunk.Append(&push.Entry{
Timestamp: time.Unix(0, 1),
Line: testLine,
diff --git a/pkg/storage/bloom/v1/builder.go b/pkg/storage/bloom/v1/builder.go
index 08f92631b1643..1e5278c24d31a 100644
--- a/pkg/storage/bloom/v1/builder.go
+++ b/pkg/storage/bloom/v1/builder.go
@@ -7,7 +7,7 @@ import (
"github.com/pkg/errors"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/util/encoding"
)
@@ -66,7 +66,7 @@ func (b BlockOptions) Encode(enc *encoding.Encbuf) {
enc.PutBE64(b.BlockSize)
}
-func NewBlockOptions(enc chunkenc.Encoding, nGramLength, nGramSkip, maxBlockSizeBytes, maxBloomSizeBytes uint64) BlockOptions {
+func NewBlockOptions(enc compression.Encoding, nGramLength, nGramSkip, maxBlockSizeBytes, maxBloomSizeBytes uint64) BlockOptions {
opts := NewBlockOptionsFromSchema(Schema{
version: CurrentSchemaVersion,
encoding: enc,
@@ -122,7 +122,7 @@ func (w *PageWriter) Add(item []byte) (offset int) {
return offset
}
-func (w *PageWriter) writePage(writer io.Writer, pool chunkenc.WriterPool, crc32Hash hash.Hash32) (int, int, error) {
+func (w *PageWriter) writePage(writer io.Writer, pool compression.WriterPool, crc32Hash hash.Hash32) (int, int, error) {
// write the number of blooms in this page, must not be varint
// so we can calculate it's position+len during decoding
w.enc.PutBE64(uint64(w.n))
diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go
index 8db825e799657..3664b60d515f7 100644
--- a/pkg/storage/bloom/v1/builder_test.go
+++ b/pkg/storage/bloom/v1/builder_test.go
@@ -9,18 +9,18 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/util/encoding"
"github.com/grafana/loki/v3/pkg/util/mempool"
)
-var blockEncodings = []chunkenc.Encoding{
- chunkenc.EncNone,
- chunkenc.EncGZIP,
- chunkenc.EncSnappy,
- chunkenc.EncLZ4_256k,
- chunkenc.EncZstd,
+var blockEncodings = []compression.Encoding{
+ compression.EncNone,
+ compression.EncGZIP,
+ compression.EncSnappy,
+ compression.EncLZ4_256k,
+ compression.EncZstd,
}
func TestBlockOptions_RoundTrip(t *testing.T) {
@@ -28,7 +28,7 @@ func TestBlockOptions_RoundTrip(t *testing.T) {
opts := BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
nGramLength: 10,
nGramSkip: 2,
},
@@ -205,7 +205,7 @@ func TestMergeBuilder(t *testing.T) {
blockOpts := BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
},
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
@@ -302,7 +302,7 @@ func TestMergeBuilderFingerprintCollision(t *testing.T) {
blockOpts := BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
},
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
@@ -399,7 +399,7 @@ func TestBlockReset(t *testing.T) {
schema := Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
nGramLength: 10,
nGramSkip: 2,
}
@@ -457,9 +457,9 @@ func TestMergeBuilder_Roundtrip(t *testing.T) {
blockOpts := BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy, // test with different encodings?
- nGramLength: 4, // needs to match values from MkBasicSeriesWithBlooms
- nGramSkip: 0, // needs to match values from MkBasicSeriesWithBlooms
+ encoding: compression.EncSnappy, // test with different encodings?
+ nGramLength: 4, // needs to match values from MkBasicSeriesWithBlooms
+ nGramSkip: 0, // needs to match values from MkBasicSeriesWithBlooms
},
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go
index 4f33a91309380..befa5a7a9fa54 100644
--- a/pkg/storage/bloom/v1/fuse_test.go
+++ b/pkg/storage/bloom/v1/fuse_test.go
@@ -13,7 +13,7 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
v2 "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter"
"github.com/grafana/loki/v3/pkg/util/mempool"
@@ -61,7 +61,7 @@ func TestFusedQuerier(t *testing.T) {
BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
},
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
@@ -155,7 +155,7 @@ func TestFuseMultiPage(t *testing.T) {
BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
nGramLength: 3, // we test trigrams
nGramSkip: 0,
},
@@ -312,7 +312,7 @@ func TestLazyBloomIter_Seek_ResetError(t *testing.T) {
BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
},
SeriesPageSize: 100,
BloomPageSize: 10, // So we force one series per page
@@ -370,7 +370,7 @@ func TestFusedQuerierSkipsEmptyBlooms(t *testing.T) {
BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncNone,
+ encoding: compression.EncNone,
},
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
@@ -431,7 +431,7 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]Request, []chan Ou
BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
},
SeriesPageSize: 256 << 10, // 256k
BloomPageSize: 1 << 20, // 1MB
diff --git a/pkg/storage/bloom/v1/schema.go b/pkg/storage/bloom/v1/schema.go
index 6fd8621654239..dd532b61559f5 100644
--- a/pkg/storage/bloom/v1/schema.go
+++ b/pkg/storage/bloom/v1/schema.go
@@ -6,7 +6,7 @@ import (
"github.com/pkg/errors"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/util/encoding"
)
@@ -39,14 +39,14 @@ var (
type Schema struct {
version Version
- encoding chunkenc.Encoding
+ encoding compression.Encoding
nGramLength, nGramSkip uint64
}
func NewSchema() Schema {
return Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncNone,
+ encoding: compression.EncNone,
nGramLength: 0,
nGramSkip: 0,
}
@@ -78,12 +78,12 @@ func (s Schema) Len() int {
return 4 + 1 + 1 + 8 + 8
}
-func (s *Schema) DecompressorPool() chunkenc.ReaderPool {
- return chunkenc.GetReaderPool(s.encoding)
+func (s *Schema) DecompressorPool() compression.ReaderPool {
+ return compression.GetReaderPool(s.encoding)
}
-func (s *Schema) CompressorPool() chunkenc.WriterPool {
- return chunkenc.GetWriterPool(s.encoding)
+func (s *Schema) CompressorPool() compression.WriterPool {
+ return compression.GetWriterPool(s.encoding)
}
func (s *Schema) Encode(enc *encoding.Encbuf) {
@@ -118,8 +118,8 @@ func (s *Schema) Decode(dec *encoding.Decbuf) error {
return errors.Errorf("invalid version. expected %d, got %d", 3, s.version)
}
- s.encoding = chunkenc.Encoding(dec.Byte())
- if _, err := chunkenc.ParseEncoding(s.encoding.String()); err != nil {
+ s.encoding = compression.Encoding(dec.Byte())
+ if _, err := compression.ParseEncoding(s.encoding.String()); err != nil {
return errors.Wrap(err, "parsing encoding")
}
diff --git a/pkg/storage/bloom/v1/test_util.go b/pkg/storage/bloom/v1/test_util.go
index 3bca46865c75b..6de5c85be4b8a 100644
--- a/pkg/storage/bloom/v1/test_util.go
+++ b/pkg/storage/bloom/v1/test_util.go
@@ -9,7 +9,7 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/pkg/push"
@@ -30,7 +30,7 @@ func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, fromT
BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: chunkenc.EncSnappy,
+ encoding: compression.EncSnappy,
nGramLength: 4, // see DefaultNGramLength in bloom_tokenizer_test.go
nGramSkip: 0, // see DefaultNGramSkip in bloom_tokenizer_test.go
},
diff --git a/pkg/storage/bloom/v1/versioned_builder_test.go b/pkg/storage/bloom/v1/versioned_builder_test.go
index 2fb45e13d63e2..2ef08daad8939 100644
--- a/pkg/storage/bloom/v1/versioned_builder_test.go
+++ b/pkg/storage/bloom/v1/versioned_builder_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
v2 "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/util/encoding"
"github.com/grafana/loki/v3/pkg/util/mempool"
@@ -14,7 +14,7 @@ import (
// smallBlockOpts returns a set of block options that are suitable for testing
// characterized by small page sizes
-func smallBlockOpts(v Version, enc chunkenc.Encoding) BlockOptions {
+func smallBlockOpts(v Version, enc compression.Encoding) BlockOptions {
return BlockOptions{
Schema: Schema{
version: v,
@@ -35,7 +35,7 @@ func setup(v Version) (BlockOptions, []SeriesWithBlooms, BlockWriter, BlockReade
bloomsBuf := bytes.NewBuffer(nil)
writer := NewMemoryBlockWriter(indexBuf, bloomsBuf)
reader := NewByteReader(indexBuf, bloomsBuf)
- return smallBlockOpts(v, chunkenc.EncNone), data, writer, reader
+ return smallBlockOpts(v, compression.EncNone), data, writer, reader
}
func TestV3Roundtrip(t *testing.T) {
diff --git a/pkg/storage/chunk/cache/cache_test.go b/pkg/storage/chunk/cache/cache_test.go
index c6ab61666b88d..2f236c1f40e48 100644
--- a/pkg/storage/chunk/cache/cache_test.go
+++ b/pkg/storage/chunk/cache/cache_test.go
@@ -15,6 +15,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
@@ -34,7 +35,7 @@ func fillCache(t *testing.T, scfg config.SchemaConfig, cache cache.Cache) ([]str
for i := 0; i < 111; i++ {
ts := model.TimeFromUnix(int64(i * chunkLen))
- cs := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
+ cs := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
_, err := cs.Append(&logproto.Entry{
Timestamp: ts.Time(),
diff --git a/pkg/storage/chunk/client/grpc/grpc_client_test.go b/pkg/storage/chunk/client/grpc/grpc_client_test.go
index b0bcffce91ebf..d40d825a94428 100644
--- a/pkg/storage/chunk/client/grpc/grpc_client_test.go
+++ b/pkg/storage/chunk/client/grpc/grpc_client_test.go
@@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/config"
@@ -81,7 +82,7 @@ func TestGrpcStore(t *testing.T) {
newChunkData := func() chunk.Data {
return chunkenc.NewFacade(
chunkenc.NewMemChunk(
- chunkenc.ChunkFormatV3, chunkenc.EncNone, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0,
+ chunkenc.ChunkFormatV3, compression.EncNone, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0,
), 0, 0)
}
diff --git a/pkg/storage/chunk/client/testutils/testutils.go b/pkg/storage/chunk/client/testutils/testutils.go
index b34e75a6a166f..e436c1335f212 100644
--- a/pkg/storage/chunk/client/testutils/testutils.go
+++ b/pkg/storage/chunk/client/testutils/testutils.go
@@ -13,6 +13,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/chunk"
@@ -86,7 +87,7 @@ func CreateChunks(scfg config.SchemaConfig, startIndex, batchSize int, from mode
}
func DummyChunkFor(from, through model.Time, metric labels.Labels) chunk.Chunk {
- cs := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
+ cs := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
for ts := from; ts <= through; ts = ts.Add(15 * time.Second) {
_, err := cs.Append(&logproto.Entry{Timestamp: ts.Time(), Line: fmt.Sprintf("line ts=%d", ts)})
diff --git a/pkg/storage/chunk/fetcher/fetcher_test.go b/pkg/storage/chunk/fetcher/fetcher_test.go
index 03efc9afdc809..58123957919bd 100644
--- a/pkg/storage/chunk/fetcher/fetcher_test.go
+++ b/pkg/storage/chunk/fetcher/fetcher_test.go
@@ -13,6 +13,7 @@ import (
"golang.org/x/exp/slices"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
@@ -311,7 +312,7 @@ func makeChunks(now time.Time, tpls ...c) []chunk.Chunk {
from := int(chk.from) / int(time.Hour)
// This is only here because it's helpful for debugging.
// This isn't even the write format for Loki but we dont' care for the sake of these tests.
- memChk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncNone, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
+ memChk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncNone, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
// To make sure the fetcher doesn't swap keys and buffers each chunk is built with different, but deterministic data
for i := 0; i < from; i++ {
_, _ = memChk.Append(&logproto.Entry{
diff --git a/pkg/storage/hack/main.go b/pkg/storage/hack/main.go
index 74257a8ba6ad0..b2d01d2e41e07 100644
--- a/pkg/storage/hack/main.go
+++ b/pkg/storage/hack/main.go
@@ -15,6 +15,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/syntax"
@@ -103,7 +104,7 @@ func fillStore(cm storage.ClientMetrics) error {
labelsBuilder.Set(labels.MetricName, "logs")
metric := labelsBuilder.Labels()
fp := client.Fingerprint(lbs)
- chunkEnc := chunkenc.NewMemChunk(chunkfmt, chunkenc.EncLZ4_4M, headfmt, 262144, 1572864)
+ chunkEnc := chunkenc.NewMemChunk(chunkfmt, compression.EncLZ4_4M, headfmt, 262144, 1572864)
for ts := start.UnixNano(); ts < start.UnixNano()+time.Hour.Nanoseconds(); ts = ts + time.Millisecond.Nanoseconds() {
entry := &logproto.Entry{
Timestamp: time.Unix(0, ts),
@@ -126,7 +127,7 @@ func fillStore(cm storage.ClientMetrics) error {
if flushCount >= maxChunks {
return
}
- chunkEnc = chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncLZ4_64k, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 262144, 1572864)
+ chunkEnc = chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, compression.EncLZ4_64k, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 262144, 1572864)
}
}
}(i)
diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go
index 101c906b8b4fe..c509783d8661f 100644
--- a/pkg/storage/store_test.go
+++ b/pkg/storage/store_test.go
@@ -13,6 +13,7 @@ import (
"testing"
"time"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/storage/types"
"github.com/grafana/loki/v3/pkg/util/httpreq"
@@ -2035,7 +2036,7 @@ func TestQueryReferencingStructuredMetadata(t *testing.T) {
metric := labelsBuilder.Labels()
fp := client.Fingerprint(lbs)
- chunkEnc := chunkenc.NewMemChunk(chunkfmt, chunkenc.EncLZ4_4M, headfmt, 262144, 1572864)
+ chunkEnc := chunkenc.NewMemChunk(chunkfmt, compression.EncLZ4_4M, headfmt, 262144, 1572864)
for ts := chkFrom; !ts.After(chkThrough); ts = ts.Add(time.Second) {
entry := logproto.Entry{
Timestamp: ts,
diff --git a/pkg/storage/stores/series/series_store_test.go b/pkg/storage/stores/series/series_store_test.go
index 553ea945f94f7..3bd136cb3b619 100644
--- a/pkg/storage/stores/series/series_store_test.go
+++ b/pkg/storage/stores/series/series_store_test.go
@@ -18,6 +18,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
@@ -752,7 +753,7 @@ func dummyChunkWithFormat(t testing.TB, now model.Time, metric labels.Labels, fo
samples := 1
chunkStart := now.Add(-time.Hour)
- chk := chunkenc.NewMemChunk(format, chunkenc.EncGZIP, headfmt, 256*1024, 0)
+ chk := chunkenc.NewMemChunk(format, compression.EncGZIP, headfmt, 256*1024, 0)
for i := 0; i < samples; i++ {
ts := time.Duration(i) * 15 * time.Second
dup, err := chk.Append(&logproto.Entry{Timestamp: chunkStart.Time().Add(ts), Line: fmt.Sprintf("line %d", i)})
diff --git a/pkg/storage/stores/series_store_write_test.go b/pkg/storage/stores/series_store_write_test.go
index f58ec1a730c52..a24608675a3d0 100644
--- a/pkg/storage/stores/series_store_write_test.go
+++ b/pkg/storage/stores/series_store_write_test.go
@@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/fetcher"
@@ -92,7 +93,7 @@ func TestChunkWriter_PutOne(t *testing.T) {
chunkfmt, headfmt, err := periodConfig.ChunkFormat()
require.NoError(t, err)
- memchk := chunkenc.NewMemChunk(chunkfmt, chunkenc.EncGZIP, headfmt, 256*1024, 0)
+ memchk := chunkenc.NewMemChunk(chunkfmt, compression.EncGZIP, headfmt, 256*1024, 0)
chk := chunk.NewChunk("fake", model.Fingerprint(0), []labels.Label{{Name: "foo", Value: "bar"}}, chunkenc.NewFacade(memchk, 0, 0), 100, 400)
for name, tc := range map[string]struct {
diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util.go
index 6f1b0326a5cc6..a7ea7af3b05ef 100644
--- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util.go
+++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util.go
@@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
ingesterclient "github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/chunk"
@@ -31,7 +32,7 @@ func createChunk(t testing.TB, chunkFormat byte, headBlockFmt chunkenc.HeadBlock
labelsBuilder.Set(labels.MetricName, "logs")
metric := labelsBuilder.Labels()
fp := ingesterclient.Fingerprint(lbs)
- chunkEnc := chunkenc.NewMemChunk(chunkFormat, chunkenc.EncSnappy, headBlockFmt, blockSize, targetSize)
+ chunkEnc := chunkenc.NewMemChunk(chunkFormat, compression.EncSnappy, headBlockFmt, blockSize, targetSize)
for ts := from; !ts.After(through); ts = ts.Add(1 * time.Minute) {
dup, err := chunkEnc.Append(&logproto.Entry{
diff --git a/pkg/storage/stores/shipper/indexshipper/uploads/index_set.go b/pkg/storage/stores/shipper/indexshipper/uploads/index_set.go
index 19bf88842b020..36dc138509564 100644
--- a/pkg/storage/stores/shipper/indexshipper/uploads/index_set.go
+++ b/pkg/storage/stores/shipper/indexshipper/uploads/index_set.go
@@ -11,7 +11,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/index"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage"
util_log "github.com/grafana/loki/v3/pkg/util/log"
@@ -145,8 +145,9 @@ func (t *indexSet) uploadIndex(ctx context.Context, idx index.Index) error {
}
}()
- compressedWriter := chunkenc.Gzip.GetWriter(f)
- defer chunkenc.Gzip.PutWriter(compressedWriter)
+ gzipPool := compression.GetWriterPool(compression.EncGZIP)
+ compressedWriter := gzipPool.GetWriter(f)
+ defer gzipPool.PutWriter(compressedWriter)
idxReader, err := idx.Reader()
if err != nil {
diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go
index 5ef02e74b1caf..dd535197afb3f 100644
--- a/pkg/storage/util_test.go
+++ b/pkg/storage/util_test.go
@@ -12,6 +12,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/syntax"
@@ -108,7 +109,7 @@ func newChunk(chunkFormat byte, headBlockFmt chunkenc.HeadBlockFmt, stream logpr
lbs = builder.Labels()
}
from, through := loki_util.RoundToMilliseconds(stream.Entries[0].Timestamp, stream.Entries[len(stream.Entries)-1].Timestamp)
- chk := chunkenc.NewMemChunk(chunkFormat, chunkenc.EncGZIP, headBlockFmt, 256*1024, 0)
+ chk := chunkenc.NewMemChunk(chunkFormat, compression.EncGZIP, headBlockFmt, 256*1024, 0)
for _, e := range stream.Entries {
_, _ = chk.Append(&e)
}
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 75128607b882e..58e93b3937d84 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -19,8 +19,8 @@ import (
"golang.org/x/time/rate"
"gopkg.in/yaml.v2"
- "github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/compactor/deletionmode"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/distributor/shardstreams"
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logql"
@@ -496,7 +496,7 @@ func (l *Limits) Validate() error {
return errors.Wrap(err, "invalid tsdb sharding strategy")
}
- if _, err := chunkenc.ParseEncoding(l.BloomBlockEncoding); err != nil {
+ if _, err := compression.ParseEncoding(l.BloomBlockEncoding); err != nil {
return err
}
diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go
index 2d4457c2a1191..87fab6837029c 100644
--- a/pkg/validation/limits_test.go
+++ b/pkg/validation/limits_test.go
@@ -12,8 +12,8 @@ import (
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
- "github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/compactor/deletionmode"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logql"
)
@@ -339,7 +339,7 @@ func TestLimitsValidation(t *testing.T) {
},
{
limits: Limits{DeletionMode: "disabled", BloomBlockEncoding: "unknown"},
- expected: fmt.Errorf("invalid encoding: unknown, supported: %s", chunkenc.SupportedEncoding()),
+ expected: fmt.Errorf("invalid encoding: unknown, supported: %s", compression.SupportedEncoding()),
},
} {
desc := fmt.Sprintf("%s/%s", tc.limits.DeletionMode, tc.limits.BloomBlockEncoding)
diff --git a/tools/tsdb/migrate-versions/main.go b/tools/tsdb/migrate-versions/main.go
index 8469cd560711b..e4fb39e69a4fa 100644
--- a/tools/tsdb/migrate-versions/main.go
+++ b/tools/tsdb/migrate-versions/main.go
@@ -17,7 +17,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
- "github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/loki"
"github.com/grafana/loki/v3/pkg/storage"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
@@ -257,8 +257,9 @@ func uploadFile(idx shipperindex.Index, indexStorageClient shipperstorage.Client
}
}()
- compressedWriter := chunkenc.Gzip.GetWriter(f)
- defer chunkenc.Gzip.PutWriter(compressedWriter)
+ gzipPool := compression.GetWriterPool(compression.EncGZIP)
+ compressedWriter := gzipPool.GetWriter(f)
+ defer gzipPool.PutWriter(compressedWriter)
idxReader, err := idx.Reader()
if err != nil {
|
chore
|
Move compression utilities into separate package (#14167)
|
9052cc272593e3de9db95d40c8c6bbb164bc517e
|
2025-03-05 23:43:42
|
renovate[bot]
|
fix(deps): update module golang.org/x/sync to v0.12.0 (main) (#16569)
| false
|
diff --git a/go.mod b/go.mod
index f4d3afef1ba66..206150168d014 100644
--- a/go.mod
+++ b/go.mod
@@ -101,7 +101,7 @@ require (
go.uber.org/goleak v1.3.0
golang.org/x/crypto v0.35.0
golang.org/x/net v0.36.0
- golang.org/x/sync v0.11.0
+ golang.org/x/sync v0.12.0
golang.org/x/sys v0.31.0
golang.org/x/time v0.10.0
google.golang.org/api v0.223.0
diff --git a/go.sum b/go.sum
index 3eb9d4e360e07..50bfe611eb872 100644
--- a/go.sum
+++ b/go.sum
@@ -1444,8 +1444,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
-golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index b8322598ae3ea..a4ea5d14f1582 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -46,7 +46,7 @@ func (g *Group) done() {
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithContext(ctx context.Context) (*Group, context.Context) {
- ctx, cancel := withCancelCause(ctx)
+ ctx, cancel := context.WithCancelCause(ctx)
return &Group{cancel: cancel}, ctx
}
diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go
deleted file mode 100644
index f93c740b638ca..0000000000000
--- a/vendor/golang.org/x/sync/errgroup/go120.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-
-package errgroup
-
-import "context"
-
-func withCancelCause(parent context.Context) (context.Context, func(error)) {
- return context.WithCancelCause(parent)
-}
diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go
deleted file mode 100644
index 88ce33434e238..0000000000000
--- a/vendor/golang.org/x/sync/errgroup/pre_go120.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.20
-
-package errgroup
-
-import "context"
-
-func withCancelCause(parent context.Context) (context.Context, func(error)) {
- ctx, cancel := context.WithCancel(parent)
- return ctx, func(error) { cancel() }
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d7b2f4a83d64f..47e9d462b68bc 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -2025,8 +2025,8 @@ golang.org/x/oauth2/google/internal/stsexchange
golang.org/x/oauth2/internal
golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
-# golang.org/x/sync v0.11.0
-## explicit; go 1.18
+# golang.org/x/sync v0.12.0
+## explicit; go 1.23.0
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
golang.org/x/sync/singleflight
|
fix
|
update module golang.org/x/sync to v0.12.0 (main) (#16569)
|
e28c15f56c2aab62eecbaa382055eac99fc3a581
|
2024-06-27 23:17:20
|
JordanRushing
|
fix: support multi-zone ingesters when converting global to local limits for streams in limiter.go (#13321)
| false
|
diff --git a/go.mod b/go.mod
index 7d561370ad7d3..4a49b95b102bc 100644
--- a/go.mod
+++ b/go.mod
@@ -50,7 +50,7 @@ require (
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.5.0
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2
- github.com/grafana/dskit v0.0.0-20240528015923-27d7d41066d3
+ github.com/grafana/dskit v0.0.0-20240626184720-35810fdf1c6d
github.com/grafana/go-gelf/v2 v2.0.1
github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
diff --git a/go.sum b/go.sum
index 39cc45a12030b..170ca1df4672f 100644
--- a/go.sum
+++ b/go.sum
@@ -1017,8 +1017,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I=
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw=
-github.com/grafana/dskit v0.0.0-20240528015923-27d7d41066d3 h1:k8vINlI4w+RYc37NRwQlRe/IHYoEbu6KAe2XdGDeV1U=
-github.com/grafana/dskit v0.0.0-20240528015923-27d7d41066d3/go.mod h1:HvSf3uf8Ps2vPpzHeAFyZTdUcbVr+Rxpq1xcx7J/muc=
+github.com/grafana/dskit v0.0.0-20240626184720-35810fdf1c6d h1:CD8PWWX+9lYdgeMquSofmLErvCtk7jb+3/W/SH6oo/k=
+github.com/grafana/dskit v0.0.0-20240626184720-35810fdf1c6d/go.mod h1:HvSf3uf8Ps2vPpzHeAFyZTdUcbVr+Rxpq1xcx7J/muc=
github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak=
github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90=
github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY=
diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
index 444e1317e6972..570452af44eb0 100644
--- a/pkg/ingester/ingester_test.go
+++ b/pkg/ingester/ingester_test.go
@@ -1568,6 +1568,10 @@ func (r *readRingMock) ZonesCount() int {
return 1
}
+func (r *readRingMock) HealthyInstancesInZoneCount() int {
+ return len(r.replicationSet.Instances)
+}
+
func (r *readRingMock) Subring(_ uint32, _ int) ring.ReadRing {
return r
}
diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go
index daa1fe7aec8da..1ed3a3ea27163 100644
--- a/pkg/ingester/limiter.go
+++ b/pkg/ingester/limiter.go
@@ -20,6 +20,8 @@ const (
// to count members
type RingCount interface {
HealthyInstancesCount() int
+ HealthyInstancesInZoneCount() int
+ ZonesCount() int
}
type Limits interface {
@@ -106,22 +108,31 @@ func (l *Limiter) minNonZero(first, second int) int {
}
func (l *Limiter) convertGlobalToLocalLimit(globalLimit int) int {
- if globalLimit == 0 {
+ if globalLimit == 0 || l.replicationFactor == 0 {
return 0
}
- // todo: change to healthyInstancesInZoneCount() once
- // Given we don't need a super accurate count (ie. when the ingesters
- // topology changes) and we prefer to always be in favor of the tenant,
- // we can use a per-ingester limit equal to:
- // (global limit / number of ingesters) * replication factor
- numIngesters := l.ring.HealthyInstancesCount()
- // May happen because the number of ingesters is asynchronously updated.
- // If happens, we just temporarily ignore the global limit.
+ zonesCount := l.ring.ZonesCount()
+ if zonesCount <= 1 {
+ return calculateLimitForSingleZone(globalLimit, l)
+ }
+
+ return calculateLimitForMultipleZones(globalLimit, zonesCount, l)
+}
+
+func calculateLimitForSingleZone(globalLimit int, l *Limiter) int {
+ numIngesters := l.ring.HealthyInstancesCount()
if numIngesters > 0 {
return int((float64(globalLimit) / float64(numIngesters)) * float64(l.replicationFactor))
}
+ return 0
+}
+func calculateLimitForMultipleZones(globalLimit, zonesCount int, l *Limiter) int {
+ ingestersInZone := l.ring.HealthyInstancesInZoneCount()
+ if ingestersInZone > 0 {
+ return int((float64(globalLimit) * float64(l.replicationFactor)) / float64(zonesCount) / float64(ingestersInZone))
+ }
return 0
}
diff --git a/pkg/ingester/limiter_test.go b/pkg/ingester/limiter_test.go
index b00bede10417d..0d0055d0a0afb 100644
--- a/pkg/ingester/limiter_test.go
+++ b/pkg/ingester/limiter_test.go
@@ -214,6 +214,14 @@ func (m *ringCountMock) HealthyInstancesCount() int {
return m.count
}
+func (m *ringCountMock) ZonesCount() int {
+ return 1
+}
+
+func (m *ringCountMock) HealthyInstancesInZoneCount() int {
+ return m.count
+}
+
// Assert some of the weirder (bug?) behavior of golang.org/x/time/rate
func TestGoLimiter(t *testing.T) {
for _, tc := range []struct {
@@ -254,3 +262,59 @@ func TestGoLimiter(t *testing.T) {
})
}
}
+
+type MockRing struct {
+ zonesCount int
+ healthyInstancesCount int
+ healthyInstancesInZoneCount int
+}
+
+func (m *MockRing) ZonesCount() int {
+ return m.zonesCount
+}
+
+func (m *MockRing) HealthyInstancesCount() int {
+ return m.healthyInstancesCount
+}
+
+func (m *MockRing) HealthyInstancesInZoneCount() int {
+ return m.healthyInstancesInZoneCount
+}
+
+func TestConvertGlobalToLocalLimit(t *testing.T) {
+ tests := []struct {
+ name string
+ globalLimit int
+ zonesCount int
+ healthyInstancesCount int
+ healthyInstancesInZoneCount int
+ replicationFactor int
+ expectedLocalLimit int
+ }{
+ {"GlobalLimitZero", 0, 1, 1, 1, 3, 0},
+ {"SingleZoneMultipleIngesters", 100, 1, 10, 10, 3, 30},
+ {"MultipleZones", 200, 3, 30, 10, 3, 20},
+ {"MultipleZonesNoHealthyIngesters", 200, 2, 0, 0, 3, 0},
+ {"MultipleZonesNoHealthyIngestersInZone", 200, 3, 10, 0, 3, 0},
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ mockRing := &MockRing{
+ zonesCount: tc.zonesCount,
+ healthyInstancesCount: tc.healthyInstancesCount,
+ healthyInstancesInZoneCount: tc.healthyInstancesInZoneCount,
+ }
+
+ limiter := &Limiter{
+ ring: mockRing,
+ replicationFactor: tc.replicationFactor,
+ }
+
+ localLimit := limiter.convertGlobalToLocalLimit(tc.globalLimit)
+ if localLimit != tc.expectedLocalLimit {
+ t.Errorf("expected %d, got %d", tc.expectedLocalLimit, localLimit)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/grafana/dskit/grpcutil/dns_resolver.go b/vendor/github.com/grafana/dskit/grpcutil/dns_resolver.go
index ef9c6398944e7..507028aa602c8 100644
--- a/vendor/github.com/grafana/dskit/grpcutil/dns_resolver.go
+++ b/vendor/github.com/grafana/dskit/grpcutil/dns_resolver.go
@@ -208,7 +208,7 @@ func (w *dnsWatcher) lookupSRV() map[string]*Update {
for _, a := range addrs {
a, ok := formatIP(a)
if !ok {
- level.Error(w.logger).Log("failed IP parsing", "err", err)
+ level.Error(w.logger).Log("msg", "failed IP parsing", "err", err)
continue
}
addr := a + ":" + strconv.Itoa(int(s.Port))
diff --git a/vendor/github.com/grafana/dskit/grpcutil/health_check.go b/vendor/github.com/grafana/dskit/grpcutil/health_check.go
index 44b5e15e7657c..e66ccd6fae496 100644
--- a/vendor/github.com/grafana/dskit/grpcutil/health_check.go
+++ b/vendor/github.com/grafana/dskit/grpcutil/health_check.go
@@ -16,7 +16,7 @@ type Check func(ctx context.Context) bool
// WithManager returns a new Check that tests if the managed services are healthy.
func WithManager(manager *services.Manager) Check {
- return func(ctx context.Context) bool {
+ return func(context.Context) bool {
states := manager.ServicesByState()
// Given this is a health check endpoint for the whole instance, we should consider
@@ -33,7 +33,7 @@ func WithManager(manager *services.Manager) Check {
// WithShutdownRequested returns a new Check that returns false when shutting down.
func WithShutdownRequested(requested *atomic.Bool) Check {
- return func(ctx context.Context) bool {
+ return func(context.Context) bool {
return !requested.Load()
}
}
diff --git a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go
index b755e2adceaeb..02e6e493736b4 100644
--- a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go
+++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go
@@ -116,8 +116,14 @@ func Errorf(code int, tmpl string, args ...interface{}) error {
})
}
-// ErrorFromHTTPResponse converts an HTTP response into a grpc error
+// ErrorFromHTTPResponse converts an HTTP response into a grpc error, and uses HTTP response body as an error message.
+// Note that if HTTP response body contains non-utf8 string, then returned error cannot be marshalled by protobuf.
func ErrorFromHTTPResponse(resp *HTTPResponse) error {
+ return ErrorFromHTTPResponseWithMessage(resp, string(resp.Body))
+}
+
+// ErrorFromHTTPResponseWithMessage converts an HTTP response into a grpc error, and uses supplied message for Error message.
+func ErrorFromHTTPResponseWithMessage(resp *HTTPResponse, msg string) error {
a, err := types.MarshalAny(resp)
if err != nil {
return err
@@ -125,7 +131,7 @@ func ErrorFromHTTPResponse(resp *HTTPResponse) error {
return status.ErrorProto(&spb.Status{
Code: resp.Code,
- Message: string(resp.Body),
+ Message: msg,
Details: []*types.Any{a},
})
}
diff --git a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go
index b73c5a0f77502..6a831dac0f8fd 100644
--- a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go
+++ b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go
@@ -26,12 +26,22 @@ import (
)
var (
- // DoNotLogErrorHeaderKey is a header key used for marking non-loggable errors. More precisely, if an HTTP response
+ // DoNotLogErrorHeaderKey is a header name used for marking non-loggable errors. More precisely, if an HTTP response
// has a status code 5xx, and contains a header with key DoNotLogErrorHeaderKey and any values, the generated error
// will be marked as non-loggable.
DoNotLogErrorHeaderKey = http.CanonicalHeaderKey("X-DoNotLogError")
+
+ // ErrorMessageHeaderKey is a header name for header that contains error message that should be used when Server.Handle
+ // (httpgrpc.HTTP/Handle implementation) decides to return the response as an error, using status.ErrorProto.
+ // Normally Server.Handle would use entire response body as a error message, but Message field of rcp.Status object
+ // is a string, and if body contains non-utf8 bytes, marshalling of this object will fail.
+ ErrorMessageHeaderKey = http.CanonicalHeaderKey("X-ErrorMessage")
)
+type contextType int
+
+const handledByHttpgrpcServer contextType = 0
+
type Option func(*Server)
func WithReturn4XXErrors(s *Server) {
@@ -59,6 +69,8 @@ func NewServer(handler http.Handler, opts ...Option) *Server {
// Handle implements HTTPServer.
func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) {
+ ctx = context.WithValue(ctx, handledByHttpgrpcServer, true)
+
req, err := httpgrpc.ToHTTPRequest(ctx, r)
if err != nil {
return nil, err
@@ -74,13 +86,24 @@ func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.
header.Del(DoNotLogErrorHeaderKey) // remove before converting to httpgrpc resp
}
+ errorMessageFromHeader := ""
+ if msg, ok := header[ErrorMessageHeaderKey]; ok {
+ errorMessageFromHeader = msg[0]
+ header.Del(ErrorMessageHeaderKey) // remove before converting to httpgrpc resp
+ }
+
resp := &httpgrpc.HTTPResponse{
Code: int32(recorder.Code),
Headers: httpgrpc.FromHeader(header),
Body: recorder.Body.Bytes(),
}
if s.shouldReturnError(resp) {
- err := httpgrpc.ErrorFromHTTPResponse(resp)
+ var err error
+ if errorMessageFromHeader != "" {
+ err = httpgrpc.ErrorFromHTTPResponseWithMessage(resp, errorMessageFromHeader)
+ } else {
+ err = httpgrpc.ErrorFromHTTPResponse(resp)
+ }
if doNotLogError {
err = middleware.DoNotLogError{Err: err}
}
@@ -206,3 +229,13 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
}
+
+// IsHandledByHttpgrpcServer returns true if context is associated with HTTP request that was initiated by
+// Server.Handle, which is an implementation of httpgrpc.HTTP/Handle gRPC method.
+func IsHandledByHttpgrpcServer(ctx context.Context) bool {
+ val := ctx.Value(handledByHttpgrpcServer)
+ if v, ok := val.(bool); ok {
+ return v
+ }
+ return false
+}
diff --git a/vendor/github.com/grafana/dskit/kv/consul/client.go b/vendor/github.com/grafana/dskit/kv/consul/client.go
index 5501a67d894b2..a750ec8263377 100644
--- a/vendor/github.com/grafana/dskit/kv/consul/client.go
+++ b/vendor/github.com/grafana/dskit/kv/consul/client.go
@@ -116,7 +116,7 @@ func (c *Client) Put(ctx context.Context, key string, value interface{}) error {
return err
}
- return instrument.CollectedRequest(ctx, "Put", c.consulMetrics.consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
+ return instrument.CollectedRequest(ctx, "Put", c.consulMetrics.consulRequestDuration, instrument.ErrorCode, func(context.Context) error {
_, err := c.kv.Put(&consul.KVPair{
Key: key,
Value: bytes,
@@ -376,16 +376,18 @@ func checkLastIndex(index, metaLastIndex uint64) (newIndex uint64, skip bool) {
// Don't just keep using index=0.
// After blocking request, returned index must be at least 1.
return 1, false
- } else if metaLastIndex < index {
+ }
+ if metaLastIndex < index {
// Index reset.
return 0, false
- } else if index == metaLastIndex {
+ }
+ if index == metaLastIndex {
// Skip if the index is the same as last time, because the key value is
// guaranteed to be the same as last time
return metaLastIndex, true
- } else {
- return metaLastIndex, false
}
+
+ return metaLastIndex, false
}
func (c *Client) createRateLimiter() *rate.Limiter {
diff --git a/vendor/github.com/grafana/dskit/kv/etcd/mock.go b/vendor/github.com/grafana/dskit/kv/etcd/mock.go
index 6349cee1c281f..c8eeb3183aa0d 100644
--- a/vendor/github.com/grafana/dskit/kv/etcd/mock.go
+++ b/vendor/github.com/grafana/dskit/kv/etcd/mock.go
@@ -234,15 +234,17 @@ func (m *mockKV) Do(_ context.Context, op clientv3.Op) (clientv3.OpResponse, err
func (m *mockKV) doInternal(op clientv3.Op) (clientv3.OpResponse, error) {
if op.IsGet() {
return m.doGet(op)
- } else if op.IsPut() {
+ }
+ if op.IsPut() {
return m.doPut(op)
- } else if op.IsDelete() {
+ }
+ if op.IsDelete() {
return m.doDelete(op)
- } else if op.IsTxn() {
+ }
+ if op.IsTxn() {
return m.doTxn(op)
- } else {
- panic(fmt.Sprintf("unsupported operation: %+v", op))
}
+ panic(fmt.Sprintf("unsupported operation: %+v", op))
}
func (m *mockKV) doGet(op clientv3.Op) (clientv3.OpResponse, error) {
diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go
index e8a94debe181c..a1b659d4097e1 100644
--- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go
+++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go
@@ -1171,7 +1171,7 @@ func (m *KV) queueBroadcast(key string, content []string, version uint, message
content: content,
version: version,
msg: message,
- finished: func(b ringBroadcast) {
+ finished: func(ringBroadcast) {
m.totalSizeOfBroadcastMessagesInQueue.Sub(float64(l))
},
logger: m.logger,
diff --git a/vendor/github.com/grafana/dskit/kv/multi.go b/vendor/github.com/grafana/dskit/kv/multi.go
index 3ac69c9fe475e..e1e461ea1f281 100644
--- a/vendor/github.com/grafana/dskit/kv/multi.go
+++ b/vendor/github.com/grafana/dskit/kv/multi.go
@@ -350,7 +350,7 @@ func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, ke
}
m.mirrorWritesCounter.Inc()
- err := kvc.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := kvc.client.CAS(ctx, key, func(interface{}) (out interface{}, retry bool, err error) {
// try once
return newValue, false, nil
})
diff --git a/vendor/github.com/grafana/dskit/middleware/logging.go b/vendor/github.com/grafana/dskit/middleware/logging.go
index fe00d3a82846c..c2306292b3f4c 100644
--- a/vendor/github.com/grafana/dskit/middleware/logging.go
+++ b/vendor/github.com/grafana/dskit/middleware/logging.go
@@ -56,9 +56,11 @@ func NewLogMiddleware(log log.Logger, logRequestHeaders bool, logRequestAtInfoLe
// logWithRequest information from the request and context as fields.
func (l Log) logWithRequest(r *http.Request) log.Logger {
localLog := l.Log
- traceID, ok := tracing.ExtractTraceID(r.Context())
+ traceID, ok := tracing.ExtractSampledTraceID(r.Context())
if ok {
localLog = log.With(localLog, "trace_id", traceID)
+ } else if traceID != "" {
+ localLog = log.With(localLog, "trace_id_unsampled", traceID)
}
if l.SourceIPs != nil {
diff --git a/vendor/github.com/grafana/dskit/ring/lifecycler.go b/vendor/github.com/grafana/dskit/ring/lifecycler.go
index 4f51b46a503c3..7c54eabdd873a 100644
--- a/vendor/github.com/grafana/dskit/ring/lifecycler.go
+++ b/vendor/github.com/grafana/dskit/ring/lifecycler.go
@@ -158,11 +158,12 @@ type Lifecycler struct {
readySince time.Time
// Keeps stats updated at every heartbeat period
- countersLock sync.RWMutex
- healthyInstancesCount int
- instancesCount int
- instancesInZoneCount int
- zonesCount int
+ countersLock sync.RWMutex
+ healthyInstancesCount int
+ instancesCount int
+ healthyInstancesInZoneCount int
+ instancesInZoneCount int
+ zonesCount int
tokenGenerator TokenGenerator
// The maximum time allowed to wait on the CanJoin() condition.
@@ -441,6 +442,15 @@ func (i *Lifecycler) InstancesCount() int {
return i.instancesCount
}
+// HealthyInstancesInZoneCount returns the number of healthy instances in the ring that are registered in
+// this lifecycler's zone, updated during the last heartbeat period.
+func (i *Lifecycler) HealthyInstancesInZoneCount() int {
+ i.countersLock.RLock()
+ defer i.countersLock.RUnlock()
+
+ return i.healthyInstancesInZoneCount
+}
+
// InstancesInZoneCount returns the number of instances in the ring that are registered in
// this lifecycler's zone, updated during the last heartbeat period.
func (i *Lifecycler) InstancesInZoneCount() int {
@@ -913,6 +923,7 @@ func (i *Lifecycler) updateCounters(ringDesc *Desc) {
healthyInstancesCount := 0
instancesCount := 0
zones := map[string]int{}
+ healthyInstancesInZone := map[string]int{}
if ringDesc != nil {
now := time.Now()
@@ -924,6 +935,7 @@ func (i *Lifecycler) updateCounters(ringDesc *Desc) {
// Count the number of healthy instances for Write operation.
if ingester.IsHealthy(Write, i.cfg.RingConfig.HeartbeatTimeout, now) {
healthyInstancesCount++
+ healthyInstancesInZone[ingester.Zone]++
}
}
}
@@ -932,6 +944,7 @@ func (i *Lifecycler) updateCounters(ringDesc *Desc) {
i.countersLock.Lock()
i.healthyInstancesCount = healthyInstancesCount
i.instancesCount = instancesCount
+ i.healthyInstancesInZoneCount = healthyInstancesInZone[i.cfg.Zone]
i.instancesInZoneCount = zones[i.cfg.Zone]
i.zonesCount = len(zones)
i.countersLock.Unlock()
diff --git a/vendor/github.com/grafana/dskit/services/failure_watcher.go b/vendor/github.com/grafana/dskit/services/failure_watcher.go
index 9cb7e3a8fa7fa..657656f50d472 100644
--- a/vendor/github.com/grafana/dskit/services/failure_watcher.go
+++ b/vendor/github.com/grafana/dskit/services/failure_watcher.go
@@ -35,7 +35,7 @@ func (w *FailureWatcher) WatchService(service Service) {
panic(errFailureWatcherNotInitialized)
}
- service.AddListener(NewListener(nil, nil, nil, nil, func(from State, failure error) {
+ service.AddListener(NewListener(nil, nil, nil, nil, func(_ State, failure error) {
w.ch <- errors.Wrapf(failure, "service %s failed", DescribeService(service))
}))
}
diff --git a/vendor/github.com/grafana/dskit/spanprofiler/tracer.go b/vendor/github.com/grafana/dskit/spanprofiler/tracer.go
index c28b52b11d444..e4ed2974a4a25 100644
--- a/vendor/github.com/grafana/dskit/spanprofiler/tracer.go
+++ b/vendor/github.com/grafana/dskit/spanprofiler/tracer.go
@@ -41,6 +41,9 @@ func (t *tracer) StartSpan(operationName string, opts ...opentracing.StartSpanOp
if !ok {
return span
}
+ if !spanCtx.IsSampled() {
+ return span
+ }
// pprof labels are attached only once, at the span root level.
if !isRootSpan(opts...) {
return span
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 083b3735cf759..4f1fcd57b8d26 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -927,7 +927,7 @@ github.com/gorilla/websocket
# github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2
## explicit; go 1.17
github.com/grafana/cloudflare-go
-# github.com/grafana/dskit v0.0.0-20240528015923-27d7d41066d3
+# github.com/grafana/dskit v0.0.0-20240626184720-35810fdf1c6d
## explicit; go 1.20
github.com/grafana/dskit/aws
github.com/grafana/dskit/backoff
|
fix
|
support multi-zone ingesters when converting global to local limits for streams in limiter.go (#13321)
|
2e7dbe6da3bc2b85d5582c6c98194987c542d499
|
2024-03-19 23:28:50
|
Pieter
|
fix: Remove lost character in ciliumnetworkpolicy.yaml (#12263)
| false
|
diff --git a/production/helm/loki/templates/ciliumnetworkpolicy.yaml b/production/helm/loki/templates/ciliumnetworkpolicy.yaml
index fb2ce12fc6c88..fbd2619d807b2 100644
--- a/production/helm/loki/templates/ciliumnetworkpolicy.yaml
+++ b/production/helm/loki/templates/ciliumnetworkpolicy.yaml
@@ -149,7 +149,7 @@ spec:
{{- range $port := .Values.networkPolicy.externalStorage.ports }}
- port: "{{ $port }}"
protocol: TCP
- {{- end }}à
+ {{- end }}
{{- if .Values.networkPolicy.externalStorage.cidrs }}
{{- range $cidr := .Values.networkPolicy.externalStorage.cidrs }}
toCIDR:
|
fix
|
Remove lost character in ciliumnetworkpolicy.yaml (#12263)
|
c0856bf4a66fd589137576b87a80f5531992660f
|
2024-11-01 03:32:24
|
Trevor Whitney
|
chore: add jaeger mixin to pattern ingester container (#14702)
| false
|
diff --git a/production/ksonnet/loki/patterns.libsonnet b/production/ksonnet/loki/patterns.libsonnet
index 06f181cb5d078..b155f1d5a64ed 100644
--- a/production/ksonnet/loki/patterns.libsonnet
+++ b/production/ksonnet/loki/patterns.libsonnet
@@ -30,7 +30,8 @@ local k = import 'ksonnet-util/kausal.libsonnet';
container.mixin.readinessProbe.withTimeoutSeconds(1) +
k.util.resourcesRequests('1', '7Gi') +
k.util.resourcesLimits('2', '14Gi') +
- container.withEnvMixin($._config.commonEnvs),
+ container.withEnvMixin($._config.commonEnvs) +
+ $.jaeger_mixin,
pattern_ingester_statefulset:
|
chore
|
add jaeger mixin to pattern ingester container (#14702)
|
2f54f8d5be3d1bc8c2b675e7689ea6e9b3340b7a
|
2024-02-24 01:26:21
|
Christian Haudum
|
chore(blooms): Implement BloomStore as a service (#12044)
| false
|
diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go
index 85bca48f54f3d..40eec568247fb 100644
--- a/pkg/bloomcompactor/bloomcompactor.go
+++ b/pkg/bloomcompactor/bloomcompactor.go
@@ -67,15 +67,17 @@ func New(
fetcherProvider stores.ChunkFetcherProvider,
sharding util_ring.TenantSharding,
limits Limits,
+ store bloomshipper.Store,
logger log.Logger,
r prometheus.Registerer,
) (*Compactor, error) {
c := &Compactor{
- cfg: cfg,
- schemaCfg: schemaCfg,
- logger: logger,
- sharding: sharding,
- limits: limits,
+ cfg: cfg,
+ schemaCfg: schemaCfg,
+ logger: logger,
+ sharding: sharding,
+ limits: limits,
+ bloomStore: store,
}
tsdbStore, err := NewTSDBStores(schemaCfg, storeCfg, clientMetrics)
@@ -84,13 +86,6 @@ func New(
}
c.tsdbStore = tsdbStore
- // TODO(owen-d): export bloomstore as a dependency that can be reused by the compactor & gateway rather that
- bloomStore, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storeCfg, clientMetrics, nil, nil, logger)
- if err != nil {
- return nil, errors.Wrap(err, "failed to create bloom store")
- }
- c.bloomStore = bloomStore
-
// initialize metrics
c.btMetrics = v1.NewMetrics(prometheus.WrapRegistererWithPrefix("loki_bloom_tokenizer_", r))
c.metrics = NewMetrics(r, c.btMetrics)
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index d0ac92db59a34..ee358ebbbc664 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -59,12 +59,8 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
- "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/queue"
- "github.com/grafana/loki/pkg/storage"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
- "github.com/grafana/loki/pkg/storage/chunk/cache"
- "github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/constants"
@@ -181,7 +177,7 @@ func (l *fixedQueueLimits) MaxConsumers(_ string, _ int) int {
}
// New returns a new instance of the Bloom Gateway.
-func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, overrides Limits, cm storage.ClientMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) {
+func New(cfg Config, store bloomshipper.Store, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) {
g := &Gateway{
cfg: cfg,
logger: logger,
@@ -192,35 +188,11 @@ func New(cfg Config, schemaCfg config.SchemaConfig, storageCfg storage.Config, o
},
workerMetrics: newWorkerMetrics(reg, constants.Loki, metricsSubsystem),
queueMetrics: queue.NewMetrics(reg, constants.Loki, metricsSubsystem),
+ bloomStore: store,
}
- var err error
-
g.queue = queue.NewRequestQueue(cfg.MaxOutstandingPerTenant, time.Minute, &fixedQueueLimits{0}, g.queueMetrics)
g.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(g.queueMetrics.Cleanup)
- var metasCache cache.Cache
- mcCfg := storageCfg.BloomShipperConfig.MetasCache
- if cache.IsCacheConfigured(mcCfg) {
- metasCache, err = cache.New(mcCfg, reg, logger, stats.BloomMetasCache, constants.Loki)
- if err != nil {
- return nil, err
- }
- }
-
- var blocksCache cache.TypedCache[string, bloomshipper.BlockDirectory]
- bcCfg := storageCfg.BloomShipperConfig.BlocksCache
- if bcCfg.IsEnabled() {
- blocksCache = bloomshipper.NewBlocksCache(bcCfg, reg, logger)
- }
-
- store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, cm, metasCache, blocksCache, logger)
- if err != nil {
- return nil, err
- }
-
- // We need to keep a reference to be able to call Stop() on shutdown of the gateway.
- g.bloomStore = store
-
if err := g.initServices(); err != nil {
return nil, err
}
@@ -286,7 +258,6 @@ func (g *Gateway) running(ctx context.Context) error {
}
func (g *Gateway) stopping(_ error) error {
- g.bloomStore.Stop()
return services.StopManagerAndAwaitStopped(context.Background(), g.serviceMngr)
}
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index f853398894e00..449c8b17a538f 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"math/rand"
- "os"
"testing"
"time"
@@ -26,6 +25,7 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/client/local"
"github.com/grafana/loki/pkg/storage/config"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
bloomshipperconfig "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
lokiring "github.com/grafana/loki/pkg/util/ring"
"github.com/grafana/loki/pkg/validation"
@@ -46,10 +46,8 @@ func newLimits() *validation.Overrides {
return overrides
}
-func TestBloomGateway_StartStopService(t *testing.T) {
+func setupBloomStore(t *testing.T) *bloomshipper.BloomStore {
logger := log.NewNopLogger()
- reg := prometheus.NewRegistry()
- limits := newLimits()
cm := storage.NewClientMetrics()
t.Cleanup(cm.Unregister)
@@ -79,6 +77,17 @@ func TestBloomGateway_StartStopService(t *testing.T) {
},
}
+ store, err := bloomshipper.NewBloomStore(schemaCfg.Configs, storageCfg, cm, nil, nil, logger)
+ require.NoError(t, err)
+ t.Cleanup(store.Stop)
+
+ return store
+}
+
+func TestBloomGateway_StartStopService(t *testing.T) {
+ logger := log.NewNopLogger()
+ reg := prometheus.NewRegistry()
+
t.Run("start and stop bloom gateway", func(t *testing.T) {
kvStore, closer := consul.NewInMemoryClient(ring.GetCodec(), logger, reg)
t.Cleanup(func() {
@@ -99,7 +108,8 @@ func TestBloomGateway_StartStopService(t *testing.T) {
MaxOutstandingPerTenant: 1024,
}
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
+ store := setupBloomStore(t)
+ gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -116,37 +126,10 @@ func TestBloomGateway_StartStopService(t *testing.T) {
func TestBloomGateway_FilterChunkRefs(t *testing.T) {
tenantID := "test"
- logger := log.NewLogfmtLogger(os.Stderr)
- reg := prometheus.NewRegistry()
- limits := newLimits()
-
- cm := storage.NewClientMetrics()
- t.Cleanup(cm.Unregister)
- p := config.PeriodConfig{
- From: parseDayTime("2023-09-01"),
- IndexTables: config.IndexPeriodicTableConfig{
- PeriodicTableConfig: config.PeriodicTableConfig{
- Prefix: "index_",
- Period: 24 * time.Hour,
- },
- },
- IndexType: config.TSDBType,
- ObjectType: config.StorageTypeFileSystem,
- Schema: "v13",
- RowShards: 16,
- }
- schemaCfg := config.SchemaConfig{
- Configs: []config.PeriodConfig{p},
- }
- storageCfg := storage.Config{
- BloomShipperConfig: bloomshipperconfig.Config{
- WorkingDirectory: t.TempDir(),
- },
- FSConfig: local.FSConfig{
- Directory: t.TempDir(),
- },
- }
+ store := setupBloomStore(t)
+ logger := log.NewNopLogger()
+ reg := prometheus.NewRegistry()
kvStore, closer := consul.NewInMemoryClient(ring.GetCodec(), logger, reg)
t.Cleanup(func() {
@@ -168,20 +151,14 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
}
t.Run("shipper error is propagated", func(t *testing.T) {
- reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
- require.NoError(t, err)
-
now := mktime("2023-10-03 10:00")
- // replace store implementation and re-initialize workers and sub-services
_, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
-
mockStore := newMockBloomStore(queriers, metas)
mockStore.err = errors.New("request failed")
- gw.bloomStore = mockStore
- err = gw.initServices()
+ reg := prometheus.NewRegistry()
+ gw, err := New(cfg, mockStore, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -216,20 +193,15 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
})
t.Run("request cancellation does not result in channel locking", func(t *testing.T) {
- reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
- require.NoError(t, err)
-
now := mktime("2024-01-25 10:00")
// replace store implementation and re-initialize workers and sub-services
_, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
-
mockStore := newMockBloomStore(queriers, metas)
mockStore.delay = 2000 * time.Millisecond
- gw.bloomStore = mockStore
- err = gw.initServices()
+ reg := prometheus.NewRegistry()
+ gw, err := New(cfg, mockStore, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -264,8 +236,10 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
})
t.Run("returns unfiltered chunk refs if no filters provided", func(t *testing.T) {
+ now := mktime("2023-10-03 10:00")
+
reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
+ gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -275,8 +249,6 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
require.NoError(t, err)
})
- now := mktime("2023-10-03 10:00")
-
chunkRefs := []*logproto.ChunkRef{
{Fingerprint: 3000, UserID: tenantID, From: now.Add(-24 * time.Hour), Through: now.Add(-23 * time.Hour), Checksum: 1},
{Fingerprint: 1000, UserID: tenantID, From: now.Add(-22 * time.Hour), Through: now.Add(-21 * time.Hour), Checksum: 2},
@@ -309,8 +281,10 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
})
t.Run("gateway tracks active users", func(t *testing.T) {
+ now := mktime("2023-10-03 10:00")
+
reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
+ gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
err = services.StartAndAwaitRunning(context.Background(), gw)
@@ -320,8 +294,6 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
require.NoError(t, err)
})
- now := mktime("2023-10-03 10:00")
-
tenants := []string{"tenant-a", "tenant-b", "tenant-c"}
for idx, tenantID := range tenants {
chunkRefs := []*logproto.ChunkRef{
@@ -349,12 +321,12 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
})
t.Run("use fuse queriers to filter chunks", func(t *testing.T) {
+ now := mktime("2023-10-03 10:00")
+
reg := prometheus.NewRegistry()
- gw, err := New(cfg, schemaCfg, storageCfg, limits, cm, logger, reg)
+ gw, err := New(cfg, store, logger, reg)
require.NoError(t, err)
- now := mktime("2023-10-03 10:00")
-
// replace store implementation and re-initialize workers and sub-services
_, metas, queriers, data := createBlocks(t, tenantID, 10, now.Add(-1*time.Hour), now, 0x0000, 0x0fff)
diff --git a/pkg/logproto/compat_test.go b/pkg/logproto/compat_test.go
index a066fe65fed1b..d4de93638f827 100644
--- a/pkg/logproto/compat_test.go
+++ b/pkg/logproto/compat_test.go
@@ -7,12 +7,13 @@ import (
"testing"
"unsafe"
- "github.com/grafana/loki/pkg/logql/syntax"
- "github.com/grafana/loki/pkg/querier/plan"
jsoniter "github.com/json-iterator/go"
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logql/syntax"
+ "github.com/grafana/loki/pkg/querier/plan"
)
// This test verifies that jsoninter uses our custom method for marshalling.
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 75401decb8fc0..d8ee613f6108b 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -54,6 +54,7 @@ import (
"github.com/grafana/loki/pkg/storage"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/series/index"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/indexgateway"
"github.com/grafana/loki/pkg/tracing"
"github.com/grafana/loki/pkg/util"
@@ -304,6 +305,7 @@ type Loki struct {
querierAPI *querier.QuerierAPI
ingesterQuerier *querier.IngesterQuerier
Store storage.Store
+ BloomStore bloomshipper.Store
tableManager *index.TableManager
frontend Frontend
ruler *base_ruler.Ruler
@@ -602,6 +604,7 @@ func (t *Loki) setupModuleManager() error {
mm.RegisterModule(RuleEvaluator, t.initRuleEvaluator, modules.UserInvisibleModule)
mm.RegisterModule(TableManager, t.initTableManager)
mm.RegisterModule(Compactor, t.initCompactor)
+ mm.RegisterModule(BloomStore, t.initBloomStore)
mm.RegisterModule(BloomCompactor, t.initBloomCompactor)
mm.RegisterModule(BloomCompactorRing, t.initBloomCompactorRing, modules.UserInvisibleModule)
mm.RegisterModule(IndexGateway, t.initIndexGateway)
@@ -638,8 +641,8 @@ func (t *Loki) setupModuleManager() error {
TableManager: {Server, Analytics},
Compactor: {Server, Overrides, MemberlistKV, Analytics},
IndexGateway: {Server, Store, IndexGatewayRing, IndexGatewayInterceptors, Analytics},
- BloomGateway: {Server, BloomGatewayRing, Analytics},
- BloomCompactor: {Server, BloomCompactorRing, Analytics, Store},
+ BloomGateway: {Server, BloomStore, BloomGatewayRing, Analytics},
+ BloomCompactor: {Server, BloomStore, BloomCompactorRing, Analytics, Store},
IngesterQuerier: {Ring},
QuerySchedulerRing: {Overrides, MemberlistKV},
IndexGatewayRing: {Overrides, MemberlistKV},
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 9d5a614dc5796..5b73be62ca51e 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -34,6 +34,7 @@ import (
"github.com/prometheus/common/version"
"github.com/grafana/loki/pkg/bloomcompactor"
+ "github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/analytics"
"github.com/grafana/loki/pkg/bloomgateway"
@@ -64,6 +65,7 @@ import (
chunk_util "github.com/grafana/loki/pkg/storage/chunk/client/util"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/series/index"
+ "github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/boltdb"
boltdbcompactor "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/boltdb/compactor"
@@ -117,6 +119,7 @@ const (
QuerySchedulerRing string = "query-scheduler-ring"
BloomCompactor string = "bloom-compactor"
BloomCompactorRing string = "bloom-compactor-ring"
+ BloomStore string = "bloom-store"
All string = "all"
Read string = "read"
Write string = "write"
@@ -645,6 +648,43 @@ func (t *Loki) initStore() (services.Service, error) {
}), nil
}
+func (t *Loki) initBloomStore() (services.Service, error) {
+ if !config.UsingObjectStorageIndex(t.Cfg.SchemaConfig.Configs) {
+ return nil, errors.New("not using shipper index type")
+ }
+
+ t.updateConfigForShipperStore()
+
+ var err error
+ logger := log.With(util_log.Logger, "component", "bloomstore")
+
+ reg := prometheus.DefaultRegisterer
+ bsCfg := t.Cfg.StorageConfig.BloomShipperConfig
+
+ var metasCache cache.Cache
+ if cache.IsCacheConfigured(bsCfg.MetasCache) {
+ metasCache, err = cache.New(bsCfg.MetasCache, reg, logger, stats.BloomMetasCache, constants.Loki)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create metas cache: %w", err)
+ }
+ }
+
+ var blocksCache cache.TypedCache[string, bloomshipper.BlockDirectory]
+ if bsCfg.BlocksCache.IsEnabled() {
+ blocksCache = bloomshipper.NewBlocksCache(bsCfg.BlocksCache, reg, logger)
+ }
+
+ t.BloomStore, err = bloomshipper.NewBloomStore(t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.clientMetrics, metasCache, blocksCache, logger)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create bloom store: %w", err)
+ }
+
+ return services.NewIdleService(nil, func(_ error) error {
+ t.BloomStore.Stop()
+ return nil
+ }), nil
+}
+
func (t *Loki) updateConfigForShipperStore() {
// Always set these configs
t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Mode = t.Cfg.IndexGateway.Mode
@@ -1272,7 +1312,7 @@ func (t *Loki) addCompactorMiddleware(h http.HandlerFunc) http.Handler {
func (t *Loki) initBloomGateway() (services.Service, error) {
logger := log.With(util_log.Logger, "component", "bloom-gateway")
- gateway, err := bloomgateway.New(t.Cfg.BloomGateway, t.Cfg.SchemaConfig, t.Cfg.StorageConfig, t.Overrides, t.clientMetrics, logger, prometheus.DefaultRegisterer)
+ gateway, err := bloomgateway.New(t.Cfg.BloomGateway, t.BloomStore, logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, err
}
@@ -1415,13 +1455,11 @@ func (t *Loki) initIndexGatewayInterceptors() (services.Service, error) {
}
func (t *Loki) initBloomCompactor() (services.Service, error) {
- t.updateConfigForShipperStore()
-
logger := log.With(util_log.Logger, "component", "bloom-compactor")
shuffleSharding := util_ring.NewTenantShuffleSharding(t.bloomCompactorRingManager.Ring, t.bloomCompactorRingManager.RingLifecycler, t.Overrides.BloomCompactorShardSize)
- compactor, err := bloomcompactor.New(
+ return bloomcompactor.New(
t.Cfg.BloomCompactor,
t.Cfg.SchemaConfig,
t.Cfg.StorageConfig,
@@ -1429,15 +1467,10 @@ func (t *Loki) initBloomCompactor() (services.Service, error) {
t.Store,
shuffleSharding,
t.Overrides,
+ t.BloomStore,
logger,
prometheus.DefaultRegisterer,
)
-
- if err != nil {
- return nil, err
- }
-
- return compactor, nil
}
func (t *Loki) initBloomCompactorRing() (services.Service, error) {
|
chore
|
Implement BloomStore as a service (#12044)
|
62f826e322229e770b74f743aaf8b17ea3f5737a
|
2025-03-13 18:04:47
|
dependabot[bot]
|
chore(deps): bump golang.org/x/net from 0.34.0 to 0.36.0 in /pkg/push (#16722)
| false
|
diff --git a/pkg/push/go.mod b/pkg/push/go.mod
index 6c5cc76809be0..6bc2d7b65bf8d 100644
--- a/pkg/push/go.mod
+++ b/pkg/push/go.mod
@@ -1,7 +1,6 @@
module github.com/grafana/loki/pkg/push
go 1.22.7
-
toolchain go1.23.3
require (
@@ -14,9 +13,9 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- golang.org/x/net v0.34.0 // indirect
- golang.org/x/sys v0.29.0 // indirect
- golang.org/x/text v0.21.0 // indirect
+ golang.org/x/net v0.36.0 // indirect
+ golang.org/x/sys v0.30.0 // indirect
+ golang.org/x/text v0.22.0 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/protobuf v1.36.4 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
diff --git a/pkg/push/go.sum b/pkg/push/go.sum
index aa6736b0f3eaf..ec1445b87b12f 100644
--- a/pkg/push/go.sum
+++ b/pkg/push/go.sum
@@ -48,20 +48,20 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
-golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
+golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
+golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
-golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
+golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
chore
|
bump golang.org/x/net from 0.34.0 to 0.36.0 in /pkg/push (#16722)
|
339ba1a5b7a4e1f37e991c401fc233b5cb19744b
|
2024-12-03 09:07:20
|
Owen Diehl
|
feat(blockbuilder): grpc transport (#15218)
| false
|
diff --git a/pkg/blockbuilder/scheduler/scheduler_test.go b/pkg/blockbuilder/scheduler/scheduler_test.go
index bd9e00450dfa7..35e53ee255993 100644
--- a/pkg/blockbuilder/scheduler/scheduler_test.go
+++ b/pkg/blockbuilder/scheduler/scheduler_test.go
@@ -15,15 +15,15 @@ import (
type testEnv struct {
queue *JobQueue
scheduler *BlockScheduler
- transport *builder.MemoryTransport
+ transport *types.MemoryTransport
builder *builder.Worker
}
func newTestEnv(builderID string) *testEnv {
queue := NewJobQueue()
scheduler := NewScheduler(Config{}, queue, nil, log.NewNopLogger(), prometheus.NewRegistry())
- transport := builder.NewMemoryTransport(scheduler)
- builder := builder.NewWorker(builderID, builder.NewMemoryTransport(scheduler))
+ transport := types.NewMemoryTransport(scheduler)
+ builder := builder.NewWorker(builderID, transport)
return &testEnv{
queue: queue,
@@ -89,7 +89,7 @@ func TestMultipleBuilders(t *testing.T) {
// Create first environment
env1 := newTestEnv("test-builder-1")
// Create second builder using same scheduler
- builder2 := builder.NewWorker("test-builder-2", builder.NewMemoryTransport(env1.scheduler))
+ builder2 := builder.NewWorker("test-builder-2", env1.transport)
ctx := context.Background()
diff --git a/pkg/blockbuilder/types/grpc_transport.go b/pkg/blockbuilder/types/grpc_transport.go
new file mode 100644
index 0000000000000..675eb92413ac7
--- /dev/null
+++ b/pkg/blockbuilder/types/grpc_transport.go
@@ -0,0 +1,147 @@
+package types
+
+import (
+ "context"
+ "flag"
+ "io"
+
+ "github.com/grafana/dskit/grpcclient"
+ "github.com/grafana/dskit/instrument"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/health/grpc_health_v1"
+
+ "github.com/grafana/loki/v3/pkg/blockbuilder/types/proto"
+ "github.com/grafana/loki/v3/pkg/util/constants"
+)
+
+var _ Transport = &GRPCTransport{}
+
+type GRPCTransportConfig struct {
+ Address string `yaml:"address,omitempty"`
+
+ // GRPCClientConfig configures the gRPC connection between the Bloom Gateway client and the server.
+ GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"`
+}
+
+func (cfg *GRPCTransportConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.StringVar(&cfg.Address, prefix+"address", "", "address in DNS Service Discovery format: https://grafana.com/docs/mimir/latest/configure/about-dns-service-discovery/#supported-discovery-modes")
+}
+
+type grpcTransportMetrics struct {
+ requestLatency *prometheus.HistogramVec
+}
+
+func newGRPCTransportMetrics(registerer prometheus.Registerer) *grpcTransportMetrics {
+ return &grpcTransportMetrics{
+ requestLatency: promauto.With(registerer).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: constants.Loki,
+ Subsystem: "block_builder_grpc",
+ Name: "request_duration_seconds",
+ Help: "Time (in seconds) spent serving requests when using the block builder grpc transport",
+ Buckets: instrument.DefBuckets,
+ }, []string{"operation", "status_code"}),
+ }
+}
+
+// GRPCTransport implements the Transport interface using gRPC
+type GRPCTransport struct {
+ grpc_health_v1.HealthClient
+ io.Closer
+ proto.BlockBuilderServiceClient
+}
+
+// NewGRPCTransportFromAddress creates a new gRPC transport instance from an address and dial options
+func NewGRPCTransportFromAddress(
+ metrics *grpcTransportMetrics,
+ cfg GRPCTransportConfig,
+) (*GRPCTransport, error) {
+
+ dialOpts, err := cfg.GRPCClientConfig.DialOption(grpcclient.Instrument(metrics.requestLatency))
+ if err != nil {
+ return nil, err
+ }
+
+ // nolint:staticcheck // grpc.Dial() has been deprecated; we'll address it before upgrading to gRPC 2.
+ conn, err := grpc.Dial(cfg.Address, dialOpts...)
+ if err != nil {
+ return nil, errors.Wrap(err, "new grpc pool dial")
+ }
+
+ return &GRPCTransport{
+ Closer: conn,
+ HealthClient: grpc_health_v1.NewHealthClient(conn),
+ BlockBuilderServiceClient: proto.NewBlockBuilderServiceClient(conn),
+ }, nil
+}
+
+// SendGetJobRequest implements Transport
+func (t *GRPCTransport) SendGetJobRequest(ctx context.Context, req *GetJobRequest) (*GetJobResponse, error) {
+ protoReq := &proto.GetJobRequest{
+ BuilderId: req.BuilderID,
+ }
+
+ resp, err := t.GetJob(ctx, protoReq)
+ if err != nil {
+ return nil, err
+ }
+
+ return &GetJobResponse{
+ Job: protoToJob(resp.GetJob()),
+ OK: resp.GetOk(),
+ }, nil
+}
+
+// SendCompleteJob implements Transport
+func (t *GRPCTransport) SendCompleteJob(ctx context.Context, req *CompleteJobRequest) error {
+ protoReq := &proto.CompleteJobRequest{
+ BuilderId: req.BuilderID,
+ Job: jobToProto(req.Job),
+ }
+
+ _, err := t.CompleteJob(ctx, protoReq)
+ return err
+}
+
+// SendSyncJob implements Transport
+func (t *GRPCTransport) SendSyncJob(ctx context.Context, req *SyncJobRequest) error {
+ protoReq := &proto.SyncJobRequest{
+ BuilderId: req.BuilderID,
+ Job: jobToProto(req.Job),
+ }
+
+ _, err := t.SyncJob(ctx, protoReq)
+ return err
+}
+
+// protoToJob converts a proto Job to a types.Job
+func protoToJob(p *proto.Job) *Job {
+ if p == nil {
+ return nil
+ }
+ return &Job{
+ ID: p.GetId(),
+ Partition: int(p.GetPartition()),
+ Offsets: Offsets{
+ Min: p.GetOffsets().GetMin(),
+ Max: p.GetOffsets().GetMax(),
+ },
+ }
+}
+
+// jobToProto converts a types.Job to a proto Job
+func jobToProto(j *Job) *proto.Job {
+ if j == nil {
+ return nil
+ }
+ return &proto.Job{
+ Id: j.ID,
+ Partition: int32(j.Partition),
+ Offsets: &proto.Offsets{
+ Min: j.Offsets.Min,
+ Max: j.Offsets.Max,
+ },
+ }
+}
diff --git a/pkg/blockbuilder/types/interfaces.go b/pkg/blockbuilder/types/interfaces.go
index 74267f912fd7e..dd719757ba6a1 100644
--- a/pkg/blockbuilder/types/interfaces.go
+++ b/pkg/blockbuilder/types/interfaces.go
@@ -24,6 +24,15 @@ type Scheduler interface {
// Transport defines the interface for communication between block builders and scheduler
type Transport interface {
+ BuilderTransport
+ SchedulerTransport
+}
+
+// SchedulerTransport is for calls originating from the scheduler
+type SchedulerTransport interface{}
+
+// BuilderTransport is for calls originating from the builder
+type BuilderTransport interface {
// SendGetJobRequest sends a request to get a new job
SendGetJobRequest(ctx context.Context, req *GetJobRequest) (*GetJobResponse, error)
// SendCompleteJob sends a job completion notification
diff --git a/pkg/blockbuilder/types/job.go b/pkg/blockbuilder/types/job.go
index d6ed42b598906..2c06fec4d48cd 100644
--- a/pkg/blockbuilder/types/job.go
+++ b/pkg/blockbuilder/types/job.go
@@ -4,8 +4,7 @@ import "fmt"
// Job represents a block building task.
type Job struct {
- ID string
- Status JobStatus
+ ID string
// Partition and offset information
Partition int
Offsets Offsets
@@ -30,7 +29,6 @@ type Offsets struct {
func NewJob(partition int, offsets Offsets) *Job {
return &Job{
ID: GenerateJobID(partition, offsets),
- Status: JobStatusPending,
Partition: partition,
Offsets: offsets,
}
diff --git a/pkg/blockbuilder/types/proto/blockbuilder.pb.go b/pkg/blockbuilder/types/proto/blockbuilder.pb.go
new file mode 100644
index 0000000000000..c5c4b05d38604
--- /dev/null
+++ b/pkg/blockbuilder/types/proto/blockbuilder.pb.go
@@ -0,0 +1,2317 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: pkg/blockbuilder/types/proto/blockbuilder.proto
+
+package proto
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// GetJobRequest represents a request for a new job
+type GetJobRequest struct {
+ BuilderId string `protobuf:"bytes,1,opt,name=builder_id,json=builderId,proto3" json:"builder_id,omitempty"`
+}
+
+func (m *GetJobRequest) Reset() { *m = GetJobRequest{} }
+func (*GetJobRequest) ProtoMessage() {}
+func (*GetJobRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_04968622516f7b79, []int{0}
+}
+func (m *GetJobRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GetJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GetJobRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GetJobRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetJobRequest.Merge(m, src)
+}
+func (m *GetJobRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *GetJobRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetJobRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetJobRequest proto.InternalMessageInfo
+
+func (m *GetJobRequest) GetBuilderId() string {
+ if m != nil {
+ return m.BuilderId
+ }
+ return ""
+}
+
+// GetJobResponse contains the response for a job request
+type GetJobResponse struct {
+ Job *Job `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"`
+ Ok bool `protobuf:"varint,2,opt,name=ok,proto3" json:"ok,omitempty"`
+}
+
+func (m *GetJobResponse) Reset() { *m = GetJobResponse{} }
+func (*GetJobResponse) ProtoMessage() {}
+func (*GetJobResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_04968622516f7b79, []int{1}
+}
+func (m *GetJobResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GetJobResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GetJobResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GetJobResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetJobResponse.Merge(m, src)
+}
+func (m *GetJobResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *GetJobResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetJobResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetJobResponse proto.InternalMessageInfo
+
+func (m *GetJobResponse) GetJob() *Job {
+ if m != nil {
+ return m.Job
+ }
+ return nil
+}
+
+func (m *GetJobResponse) GetOk() bool {
+ if m != nil {
+ return m.Ok
+ }
+ return false
+}
+
+// CompleteJobRequest represents a job completion notification
+type CompleteJobRequest struct {
+ BuilderId string `protobuf:"bytes,1,opt,name=builder_id,json=builderId,proto3" json:"builder_id,omitempty"`
+ Job *Job `protobuf:"bytes,2,opt,name=job,proto3" json:"job,omitempty"`
+}
+
+func (m *CompleteJobRequest) Reset() { *m = CompleteJobRequest{} }
+func (*CompleteJobRequest) ProtoMessage() {}
+func (*CompleteJobRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_04968622516f7b79, []int{2}
+}
+func (m *CompleteJobRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CompleteJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CompleteJobRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CompleteJobRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompleteJobRequest.Merge(m, src)
+}
+func (m *CompleteJobRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *CompleteJobRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompleteJobRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompleteJobRequest proto.InternalMessageInfo
+
+func (m *CompleteJobRequest) GetBuilderId() string {
+ if m != nil {
+ return m.BuilderId
+ }
+ return ""
+}
+
+func (m *CompleteJobRequest) GetJob() *Job {
+ if m != nil {
+ return m.Job
+ }
+ return nil
+}
+
+// CompleteJobResponse is an empty response for job completion
+type CompleteJobResponse struct {
+}
+
+func (m *CompleteJobResponse) Reset() { *m = CompleteJobResponse{} }
+func (*CompleteJobResponse) ProtoMessage() {}
+func (*CompleteJobResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_04968622516f7b79, []int{3}
+}
+func (m *CompleteJobResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CompleteJobResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CompleteJobResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CompleteJobResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompleteJobResponse.Merge(m, src)
+}
+func (m *CompleteJobResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *CompleteJobResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompleteJobResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompleteJobResponse proto.InternalMessageInfo
+
+// SyncJobRequest represents a job sync request
+type SyncJobRequest struct {
+ BuilderId string `protobuf:"bytes,1,opt,name=builder_id,json=builderId,proto3" json:"builder_id,omitempty"`
+ Job *Job `protobuf:"bytes,2,opt,name=job,proto3" json:"job,omitempty"`
+}
+
+func (m *SyncJobRequest) Reset() { *m = SyncJobRequest{} }
+func (*SyncJobRequest) ProtoMessage() {}
+func (*SyncJobRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_04968622516f7b79, []int{4}
+}
+func (m *SyncJobRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SyncJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SyncJobRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SyncJobRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SyncJobRequest.Merge(m, src)
+}
+func (m *SyncJobRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *SyncJobRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SyncJobRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SyncJobRequest proto.InternalMessageInfo
+
+func (m *SyncJobRequest) GetBuilderId() string {
+ if m != nil {
+ return m.BuilderId
+ }
+ return ""
+}
+
+func (m *SyncJobRequest) GetJob() *Job {
+ if m != nil {
+ return m.Job
+ }
+ return nil
+}
+
+// SyncJobResponse is an empty response for job sync
+type SyncJobResponse struct {
+}
+
+func (m *SyncJobResponse) Reset() { *m = SyncJobResponse{} }
+func (*SyncJobResponse) ProtoMessage() {}
+func (*SyncJobResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_04968622516f7b79, []int{5}
+}
+func (m *SyncJobResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SyncJobResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SyncJobResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SyncJobResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SyncJobResponse.Merge(m, src)
+}
+func (m *SyncJobResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *SyncJobResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_SyncJobResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SyncJobResponse proto.InternalMessageInfo
+
+// Offsets represents the start and end offsets for a job
+type Offsets struct {
+ Min int64 `protobuf:"varint,1,opt,name=min,proto3" json:"min,omitempty"`
+ Max int64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"`
+}
+
+func (m *Offsets) Reset() { *m = Offsets{} }
+func (*Offsets) ProtoMessage() {}
+func (*Offsets) Descriptor() ([]byte, []int) {
+ return fileDescriptor_04968622516f7b79, []int{6}
+}
+func (m *Offsets) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Offsets) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Offsets.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Offsets) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Offsets.Merge(m, src)
+}
+func (m *Offsets) XXX_Size() int {
+ return m.Size()
+}
+func (m *Offsets) XXX_DiscardUnknown() {
+ xxx_messageInfo_Offsets.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Offsets proto.InternalMessageInfo
+
+func (m *Offsets) GetMin() int64 {
+ if m != nil {
+ return m.Min
+ }
+ return 0
+}
+
+func (m *Offsets) GetMax() int64 {
+ if m != nil {
+ return m.Max
+ }
+ return 0
+}
+
+// Job represents a block building job
+type Job struct {
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Partition int32 `protobuf:"varint,2,opt,name=partition,proto3" json:"partition,omitempty"`
+ Offsets *Offsets `protobuf:"bytes,3,opt,name=offsets,proto3" json:"offsets,omitempty"`
+}
+
+func (m *Job) Reset() { *m = Job{} }
+func (*Job) ProtoMessage() {}
+func (*Job) Descriptor() ([]byte, []int) {
+ return fileDescriptor_04968622516f7b79, []int{7}
+}
+func (m *Job) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Job.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Job) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Job.Merge(m, src)
+}
+func (m *Job) XXX_Size() int {
+ return m.Size()
+}
+func (m *Job) XXX_DiscardUnknown() {
+ xxx_messageInfo_Job.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Job proto.InternalMessageInfo
+
+func (m *Job) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *Job) GetPartition() int32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+func (m *Job) GetOffsets() *Offsets {
+ if m != nil {
+ return m.Offsets
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*GetJobRequest)(nil), "blockbuilder.types.GetJobRequest")
+ proto.RegisterType((*GetJobResponse)(nil), "blockbuilder.types.GetJobResponse")
+ proto.RegisterType((*CompleteJobRequest)(nil), "blockbuilder.types.CompleteJobRequest")
+ proto.RegisterType((*CompleteJobResponse)(nil), "blockbuilder.types.CompleteJobResponse")
+ proto.RegisterType((*SyncJobRequest)(nil), "blockbuilder.types.SyncJobRequest")
+ proto.RegisterType((*SyncJobResponse)(nil), "blockbuilder.types.SyncJobResponse")
+ proto.RegisterType((*Offsets)(nil), "blockbuilder.types.Offsets")
+ proto.RegisterType((*Job)(nil), "blockbuilder.types.Job")
+}
+
+func init() {
+ proto.RegisterFile("pkg/blockbuilder/types/proto/blockbuilder.proto", fileDescriptor_04968622516f7b79)
+}
+
+var fileDescriptor_04968622516f7b79 = []byte{
+ // 438 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0xcf, 0xae, 0xd2, 0x40,
+ 0x18, 0xc5, 0x3b, 0x6d, 0xbc, 0xc8, 0x77, 0x23, 0xea, 0xdc, 0x18, 0x09, 0xea, 0xe4, 0x5a, 0x13,
+ 0xbd, 0x2e, 0x6c, 0x13, 0xd4, 0x17, 0xc0, 0x85, 0x11, 0x17, 0xc6, 0xe2, 0x8a, 0x85, 0xda, 0x3f,
+ 0x03, 0x0e, 0x2d, 0x9d, 0xda, 0x0e, 0x06, 0x76, 0x3e, 0x82, 0x8f, 0xe0, 0xd2, 0x47, 0x71, 0xc9,
+ 0x92, 0xa5, 0x94, 0x8d, 0x4b, 0x1e, 0xc1, 0x74, 0xda, 0xa2, 0x0d, 0x0d, 0xb8, 0xb9, 0xab, 0x36,
+ 0xa7, 0xbf, 0x9e, 0x73, 0xf2, 0x7d, 0x33, 0x60, 0x46, 0xfe, 0xd8, 0x74, 0x02, 0xee, 0xfa, 0xce,
+ 0x8c, 0x05, 0x1e, 0x8d, 0x4d, 0xb1, 0x88, 0x68, 0x62, 0x46, 0x31, 0x17, 0xbc, 0xf2, 0xc1, 0x90,
+ 0x12, 0xc6, 0x15, 0x4d, 0xc2, 0xba, 0x01, 0xd7, 0x5e, 0x52, 0xd1, 0xe7, 0x8e, 0x45, 0x3f, 0xcf,
+ 0x68, 0x22, 0xf0, 0x3d, 0x80, 0x82, 0xf8, 0xc0, 0xbc, 0x36, 0x3a, 0x47, 0x17, 0x4d, 0xab, 0x59,
+ 0x28, 0xaf, 0x3c, 0xfd, 0x35, 0xb4, 0x4a, 0x3e, 0x89, 0x78, 0x98, 0x50, 0xfc, 0x18, 0xb4, 0x09,
+ 0x77, 0x24, 0x79, 0xda, 0xbd, 0x6d, 0xec, 0x67, 0x18, 0x19, 0x9d, 0x31, 0xb8, 0x05, 0x2a, 0xf7,
+ 0xdb, 0xea, 0x39, 0xba, 0xb8, 0x6a, 0xa9, 0xdc, 0xd7, 0xdf, 0x03, 0x7e, 0xc1, 0xa7, 0x51, 0x40,
+ 0x05, 0xfd, 0xef, 0x06, 0x65, 0x9e, 0x7a, 0x3c, 0x4f, 0xbf, 0x05, 0x67, 0x15, 0xff, 0xbc, 0xb1,
+ 0x3e, 0x84, 0xd6, 0x60, 0x11, 0xba, 0x97, 0x12, 0x79, 0x13, 0xae, 0xef, 0xbc, 0x8b, 0xb8, 0x27,
+ 0xd0, 0x78, 0x33, 0x1a, 0x25, 0x54, 0x24, 0xf8, 0x06, 0x68, 0x53, 0x16, 0xca, 0x00, 0xcd, 0xca,
+ 0x5e, 0xa5, 0x62, 0xcf, 0xa5, 0x75, 0xa6, 0xd8, 0x73, 0x7d, 0x02, 0x5a, 0x3f, 0x9f, 0xd5, 0xae,
+ 0x8a, 0xca, 0x3c, 0x7c, 0x17, 0x9a, 0x91, 0x1d, 0x0b, 0x26, 0x18, 0x0f, 0x25, 0x7e, 0xc5, 0xfa,
+ 0x2b, 0xe0, 0xe7, 0xd0, 0xe0, 0x79, 0x46, 0x5b, 0x93, 0x2d, 0xef, 0xd4, 0xb5, 0x2c, 0x6a, 0x58,
+ 0x25, 0xdb, 0xfd, 0xae, 0xc2, 0x59, 0x2f, 0xe3, 0x7a, 0x39, 0x37, 0xa0, 0xf1, 0x17, 0xe6, 0x52,
+ 0xfc, 0x16, 0x4e, 0xf2, 0x2d, 0xe3, 0xfb, 0x75, 0x3e, 0x95, 0x13, 0xd3, 0xd1, 0x0f, 0x21, 0xc5,
+ 0x0c, 0x14, 0xfc, 0x11, 0x4e, 0xff, 0xd9, 0x05, 0x7e, 0x58, 0xf7, 0xd3, 0xfe, 0x61, 0xe8, 0x3c,
+ 0x3a, 0xca, 0xed, 0x12, 0xde, 0x41, 0xa3, 0x18, 0x3d, 0xae, 0xad, 0x54, 0xdd, 0x79, 0xe7, 0xc1,
+ 0x41, 0xa6, 0x74, 0xed, 0x4d, 0x96, 0x6b, 0xa2, 0xac, 0xd6, 0x44, 0xd9, 0xae, 0x09, 0xfa, 0x9a,
+ 0x12, 0xf4, 0x23, 0x25, 0xe8, 0x67, 0x4a, 0xd0, 0x32, 0x25, 0xe8, 0x57, 0x4a, 0xd0, 0xef, 0x94,
+ 0x28, 0xdb, 0x94, 0xa0, 0x6f, 0x1b, 0xa2, 0x2c, 0x37, 0x44, 0x59, 0x6d, 0x88, 0x32, 0x7c, 0x36,
+ 0x66, 0xe2, 0xd3, 0xcc, 0x31, 0x5c, 0x3e, 0x35, 0xc7, 0xb1, 0x3d, 0xb2, 0x43, 0xdb, 0x0c, 0xb8,
+ 0xcf, 0x0e, 0xde, 0x59, 0xe7, 0x44, 0x3e, 0x9e, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x42,
+ 0xf6, 0xf1, 0xda, 0x03, 0x00, 0x00,
+}
+
+func (this *GetJobRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*GetJobRequest)
+ if !ok {
+ that2, ok := that.(GetJobRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.BuilderId != that1.BuilderId {
+ return false
+ }
+ return true
+}
+func (this *GetJobResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*GetJobResponse)
+ if !ok {
+ that2, ok := that.(GetJobResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if !this.Job.Equal(that1.Job) {
+ return false
+ }
+ if this.Ok != that1.Ok {
+ return false
+ }
+ return true
+}
+func (this *CompleteJobRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*CompleteJobRequest)
+ if !ok {
+ that2, ok := that.(CompleteJobRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.BuilderId != that1.BuilderId {
+ return false
+ }
+ if !this.Job.Equal(that1.Job) {
+ return false
+ }
+ return true
+}
+func (this *CompleteJobResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*CompleteJobResponse)
+ if !ok {
+ that2, ok := that.(CompleteJobResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ return true
+}
+func (this *SyncJobRequest) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*SyncJobRequest)
+ if !ok {
+ that2, ok := that.(SyncJobRequest)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.BuilderId != that1.BuilderId {
+ return false
+ }
+ if !this.Job.Equal(that1.Job) {
+ return false
+ }
+ return true
+}
+func (this *SyncJobResponse) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*SyncJobResponse)
+ if !ok {
+ that2, ok := that.(SyncJobResponse)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ return true
+}
+func (this *Offsets) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*Offsets)
+ if !ok {
+ that2, ok := that.(Offsets)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Min != that1.Min {
+ return false
+ }
+ if this.Max != that1.Max {
+ return false
+ }
+ return true
+}
+func (this *Job) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*Job)
+ if !ok {
+ that2, ok := that.(Job)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Id != that1.Id {
+ return false
+ }
+ if this.Partition != that1.Partition {
+ return false
+ }
+ if !this.Offsets.Equal(that1.Offsets) {
+ return false
+ }
+ return true
+}
+func (this *GetJobRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&proto.GetJobRequest{")
+ s = append(s, "BuilderId: "+fmt.Sprintf("%#v", this.BuilderId)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *GetJobResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&proto.GetJobResponse{")
+ if this.Job != nil {
+ s = append(s, "Job: "+fmt.Sprintf("%#v", this.Job)+",\n")
+ }
+ s = append(s, "Ok: "+fmt.Sprintf("%#v", this.Ok)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CompleteJobRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&proto.CompleteJobRequest{")
+ s = append(s, "BuilderId: "+fmt.Sprintf("%#v", this.BuilderId)+",\n")
+ if this.Job != nil {
+ s = append(s, "Job: "+fmt.Sprintf("%#v", this.Job)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *CompleteJobResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&proto.CompleteJobResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SyncJobRequest) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&proto.SyncJobRequest{")
+ s = append(s, "BuilderId: "+fmt.Sprintf("%#v", this.BuilderId)+",\n")
+ if this.Job != nil {
+ s = append(s, "Job: "+fmt.Sprintf("%#v", this.Job)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *SyncJobResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 4)
+ s = append(s, "&proto.SyncJobResponse{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Offsets) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&proto.Offsets{")
+ s = append(s, "Min: "+fmt.Sprintf("%#v", this.Min)+",\n")
+ s = append(s, "Max: "+fmt.Sprintf("%#v", this.Max)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *Job) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&proto.Job{")
+ s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n")
+ s = append(s, "Partition: "+fmt.Sprintf("%#v", this.Partition)+",\n")
+ if this.Offsets != nil {
+ s = append(s, "Offsets: "+fmt.Sprintf("%#v", this.Offsets)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringBlockbuilder(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// BlockBuilderServiceClient is the client API for BlockBuilderService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type BlockBuilderServiceClient interface {
+ // GetJob requests a new job from the scheduler
+ GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*GetJobResponse, error)
+ // CompleteJob notifies the scheduler that a job has been completed
+ CompleteJob(ctx context.Context, in *CompleteJobRequest, opts ...grpc.CallOption) (*CompleteJobResponse, error)
+ // SyncJob syncs job state with the scheduler
+ SyncJob(ctx context.Context, in *SyncJobRequest, opts ...grpc.CallOption) (*SyncJobResponse, error)
+}
+
+type blockBuilderServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewBlockBuilderServiceClient(cc *grpc.ClientConn) BlockBuilderServiceClient {
+ return &blockBuilderServiceClient{cc}
+}
+
+func (c *blockBuilderServiceClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*GetJobResponse, error) {
+ out := new(GetJobResponse)
+ err := c.cc.Invoke(ctx, "/blockbuilder.types.BlockBuilderService/GetJob", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *blockBuilderServiceClient) CompleteJob(ctx context.Context, in *CompleteJobRequest, opts ...grpc.CallOption) (*CompleteJobResponse, error) {
+ out := new(CompleteJobResponse)
+ err := c.cc.Invoke(ctx, "/blockbuilder.types.BlockBuilderService/CompleteJob", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *blockBuilderServiceClient) SyncJob(ctx context.Context, in *SyncJobRequest, opts ...grpc.CallOption) (*SyncJobResponse, error) {
+ out := new(SyncJobResponse)
+ err := c.cc.Invoke(ctx, "/blockbuilder.types.BlockBuilderService/SyncJob", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// BlockBuilderServiceServer is the server API for BlockBuilderService service.
+type BlockBuilderServiceServer interface {
+ // GetJob requests a new job from the scheduler
+ GetJob(context.Context, *GetJobRequest) (*GetJobResponse, error)
+ // CompleteJob notifies the scheduler that a job has been completed
+ CompleteJob(context.Context, *CompleteJobRequest) (*CompleteJobResponse, error)
+ // SyncJob syncs job state with the scheduler
+ SyncJob(context.Context, *SyncJobRequest) (*SyncJobResponse, error)
+}
+
+// UnimplementedBlockBuilderServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedBlockBuilderServiceServer struct {
+}
+
+func (*UnimplementedBlockBuilderServiceServer) GetJob(ctx context.Context, req *GetJobRequest) (*GetJobResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetJob not implemented")
+}
+func (*UnimplementedBlockBuilderServiceServer) CompleteJob(ctx context.Context, req *CompleteJobRequest) (*CompleteJobResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CompleteJob not implemented")
+}
+func (*UnimplementedBlockBuilderServiceServer) SyncJob(ctx context.Context, req *SyncJobRequest) (*SyncJobResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SyncJob not implemented")
+}
+
+func RegisterBlockBuilderServiceServer(s *grpc.Server, srv BlockBuilderServiceServer) {
+ s.RegisterService(&_BlockBuilderService_serviceDesc, srv)
+}
+
+func _BlockBuilderService_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetJobRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(BlockBuilderServiceServer).GetJob(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/blockbuilder.types.BlockBuilderService/GetJob",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(BlockBuilderServiceServer).GetJob(ctx, req.(*GetJobRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _BlockBuilderService_CompleteJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CompleteJobRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(BlockBuilderServiceServer).CompleteJob(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/blockbuilder.types.BlockBuilderService/CompleteJob",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(BlockBuilderServiceServer).CompleteJob(ctx, req.(*CompleteJobRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _BlockBuilderService_SyncJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SyncJobRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(BlockBuilderServiceServer).SyncJob(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/blockbuilder.types.BlockBuilderService/SyncJob",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(BlockBuilderServiceServer).SyncJob(ctx, req.(*SyncJobRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _BlockBuilderService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "blockbuilder.types.BlockBuilderService",
+ HandlerType: (*BlockBuilderServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GetJob",
+ Handler: _BlockBuilderService_GetJob_Handler,
+ },
+ {
+ MethodName: "CompleteJob",
+ Handler: _BlockBuilderService_CompleteJob_Handler,
+ },
+ {
+ MethodName: "SyncJob",
+ Handler: _BlockBuilderService_SyncJob_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "pkg/blockbuilder/types/proto/blockbuilder.proto",
+}
+
+func (m *GetJobRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetJobRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetJobRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.BuilderId) > 0 {
+ i -= len(m.BuilderId)
+ copy(dAtA[i:], m.BuilderId)
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(len(m.BuilderId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GetJobResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GetJobResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetJobResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Ok {
+ i--
+ if m.Ok {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Job != nil {
+ {
+ size, err := m.Job.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CompleteJobRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CompleteJobRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompleteJobRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Job != nil {
+ {
+ size, err := m.Job.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.BuilderId) > 0 {
+ i -= len(m.BuilderId)
+ copy(dAtA[i:], m.BuilderId)
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(len(m.BuilderId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CompleteJobResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CompleteJobResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompleteJobResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *SyncJobRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SyncJobRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SyncJobRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Job != nil {
+ {
+ size, err := m.Job.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.BuilderId) > 0 {
+ i -= len(m.BuilderId)
+ copy(dAtA[i:], m.BuilderId)
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(len(m.BuilderId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SyncJobResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SyncJobResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SyncJobResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *Offsets) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Offsets) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Offsets) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Max != 0 {
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(m.Max))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Min != 0 {
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(m.Min))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Job) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Job) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Offsets != nil {
+ {
+ size, err := m.Offsets.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Partition != 0 {
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(m.Partition))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Id) > 0 {
+ i -= len(m.Id)
+ copy(dAtA[i:], m.Id)
+ i = encodeVarintBlockbuilder(dAtA, i, uint64(len(m.Id)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintBlockbuilder(dAtA []byte, offset int, v uint64) int {
+ offset -= sovBlockbuilder(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *GetJobRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.BuilderId)
+ if l > 0 {
+ n += 1 + l + sovBlockbuilder(uint64(l))
+ }
+ return n
+}
+
+func (m *GetJobResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Job != nil {
+ l = m.Job.Size()
+ n += 1 + l + sovBlockbuilder(uint64(l))
+ }
+ if m.Ok {
+ n += 2
+ }
+ return n
+}
+
+func (m *CompleteJobRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.BuilderId)
+ if l > 0 {
+ n += 1 + l + sovBlockbuilder(uint64(l))
+ }
+ if m.Job != nil {
+ l = m.Job.Size()
+ n += 1 + l + sovBlockbuilder(uint64(l))
+ }
+ return n
+}
+
+func (m *CompleteJobResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *SyncJobRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.BuilderId)
+ if l > 0 {
+ n += 1 + l + sovBlockbuilder(uint64(l))
+ }
+ if m.Job != nil {
+ l = m.Job.Size()
+ n += 1 + l + sovBlockbuilder(uint64(l))
+ }
+ return n
+}
+
+func (m *SyncJobResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *Offsets) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Min != 0 {
+ n += 1 + sovBlockbuilder(uint64(m.Min))
+ }
+ if m.Max != 0 {
+ n += 1 + sovBlockbuilder(uint64(m.Max))
+ }
+ return n
+}
+
+func (m *Job) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Id)
+ if l > 0 {
+ n += 1 + l + sovBlockbuilder(uint64(l))
+ }
+ if m.Partition != 0 {
+ n += 1 + sovBlockbuilder(uint64(m.Partition))
+ }
+ if m.Offsets != nil {
+ l = m.Offsets.Size()
+ n += 1 + l + sovBlockbuilder(uint64(l))
+ }
+ return n
+}
+
+func sovBlockbuilder(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozBlockbuilder(x uint64) (n int) {
+ return sovBlockbuilder(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *GetJobRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetJobRequest{`,
+ `BuilderId:` + fmt.Sprintf("%v", this.BuilderId) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GetJobResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GetJobResponse{`,
+ `Job:` + strings.Replace(this.Job.String(), "Job", "Job", 1) + `,`,
+ `Ok:` + fmt.Sprintf("%v", this.Ok) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CompleteJobRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CompleteJobRequest{`,
+ `BuilderId:` + fmt.Sprintf("%v", this.BuilderId) + `,`,
+ `Job:` + strings.Replace(this.Job.String(), "Job", "Job", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CompleteJobResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CompleteJobResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SyncJobRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SyncJobRequest{`,
+ `BuilderId:` + fmt.Sprintf("%v", this.BuilderId) + `,`,
+ `Job:` + strings.Replace(this.Job.String(), "Job", "Job", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SyncJobResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SyncJobResponse{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Offsets) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Offsets{`,
+ `Min:` + fmt.Sprintf("%v", this.Min) + `,`,
+ `Max:` + fmt.Sprintf("%v", this.Max) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Job) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Job{`,
+ `Id:` + fmt.Sprintf("%v", this.Id) + `,`,
+ `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`,
+ `Offsets:` + strings.Replace(this.Offsets.String(), "Offsets", "Offsets", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringBlockbuilder(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *GetJobRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetJobRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetJobRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BuilderId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BuilderId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipBlockbuilder(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GetJobResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GetJobResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GetJobResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Job", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Job == nil {
+ m.Job = &Job{}
+ }
+ if err := m.Job.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ok", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Ok = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipBlockbuilder(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompleteJobRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompleteJobRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompleteJobRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BuilderId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BuilderId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Job", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Job == nil {
+ m.Job = &Job{}
+ }
+ if err := m.Job.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipBlockbuilder(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompleteJobResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompleteJobResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompleteJobResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipBlockbuilder(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SyncJobRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SyncJobRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SyncJobRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BuilderId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BuilderId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Job", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Job == nil {
+ m.Job = &Job{}
+ }
+ if err := m.Job.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipBlockbuilder(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SyncJobResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SyncJobResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SyncJobResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipBlockbuilder(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Offsets) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Offsets: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Offsets: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+ }
+ m.Min = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Min |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+ }
+ m.Max = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Max |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipBlockbuilder(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Job) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Job: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Id = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
+ }
+ m.Partition = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Partition |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Offsets", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Offsets == nil {
+ m.Offsets = &Offsets{}
+ }
+ if err := m.Offsets.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipBlockbuilder(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthBlockbuilder
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipBlockbuilder(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthBlockbuilder
+ }
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthBlockbuilder
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowBlockbuilder
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipBlockbuilder(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthBlockbuilder
+ }
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthBlockbuilder = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowBlockbuilder = fmt.Errorf("proto: integer overflow")
+)
diff --git a/pkg/blockbuilder/types/proto/blockbuilder.proto b/pkg/blockbuilder/types/proto/blockbuilder.proto
new file mode 100644
index 0000000000000..89811989b821c
--- /dev/null
+++ b/pkg/blockbuilder/types/proto/blockbuilder.proto
@@ -0,0 +1,57 @@
+syntax = "proto3";
+
+package blockbuilder.types;
+
+option go_package = "github.com/grafana/loki/pkg/blockbuilder/types/proto";
+
+// BlockBuilderService defines the gRPC service for block builder communication
+service BlockBuilderService {
+ // GetJob requests a new job from the scheduler
+ rpc GetJob(GetJobRequest) returns (GetJobResponse) {}
+ // CompleteJob notifies the scheduler that a job has been completed
+ rpc CompleteJob(CompleteJobRequest) returns (CompleteJobResponse) {}
+ // SyncJob syncs job state with the scheduler
+ rpc SyncJob(SyncJobRequest) returns (SyncJobResponse) {}
+}
+
+// GetJobRequest represents a request for a new job
+message GetJobRequest {
+ string builder_id = 1;
+}
+
+// GetJobResponse contains the response for a job request
+message GetJobResponse {
+ Job job = 1;
+ bool ok = 2;
+}
+
+// CompleteJobRequest represents a job completion notification
+message CompleteJobRequest {
+ string builder_id = 1;
+ Job job = 2;
+}
+
+// CompleteJobResponse is an empty response for job completion
+message CompleteJobResponse {}
+
+// SyncJobRequest represents a job sync request
+message SyncJobRequest {
+ string builder_id = 1;
+ Job job = 2;
+}
+
+// SyncJobResponse is an empty response for job sync
+message SyncJobResponse {}
+
+// Offsets represents the start and end offsets for a job
+message Offsets {
+ int64 min = 1;
+ int64 max = 2;
+}
+
+// Job represents a block building job
+message Job {
+ string id = 1;
+ int32 partition = 2;
+ Offsets offsets = 3;
+}
diff --git a/pkg/blockbuilder/builder/transport.go b/pkg/blockbuilder/types/transport.go
similarity index 66%
rename from pkg/blockbuilder/builder/transport.go
rename to pkg/blockbuilder/types/transport.go
index ae498459cb667..5659ffb48a4b4 100644
--- a/pkg/blockbuilder/builder/transport.go
+++ b/pkg/blockbuilder/types/transport.go
@@ -1,58 +1,56 @@
-package builder
+package types
import (
"context"
-
- "github.com/grafana/loki/v3/pkg/blockbuilder/types"
)
var (
- _ types.Transport = unimplementedTransport{}
- _ types.Transport = &MemoryTransport{}
+ _ Transport = unimplementedTransport{}
+ _ Transport = &MemoryTransport{}
)
// unimplementedTransport provides default implementations that panic
type unimplementedTransport struct{}
-func (t unimplementedTransport) SendGetJobRequest(_ context.Context, _ *types.GetJobRequest) (*types.GetJobResponse, error) {
+func (t unimplementedTransport) SendGetJobRequest(_ context.Context, _ *GetJobRequest) (*GetJobResponse, error) {
panic("unimplemented")
}
-func (t unimplementedTransport) SendCompleteJob(_ context.Context, _ *types.CompleteJobRequest) error {
+func (t unimplementedTransport) SendCompleteJob(_ context.Context, _ *CompleteJobRequest) error {
panic("unimplemented")
}
-func (t unimplementedTransport) SendSyncJob(_ context.Context, _ *types.SyncJobRequest) error {
+func (t unimplementedTransport) SendSyncJob(_ context.Context, _ *SyncJobRequest) error {
panic("unimplemented")
}
// MemoryTransport implements Transport interface for in-memory communication
type MemoryTransport struct {
- scheduler types.Scheduler
+ scheduler Scheduler
}
// NewMemoryTransport creates a new in-memory transport instance
-func NewMemoryTransport(scheduler types.Scheduler) *MemoryTransport {
+func NewMemoryTransport(scheduler Scheduler) *MemoryTransport {
return &MemoryTransport{
scheduler: scheduler,
}
}
-func (t *MemoryTransport) SendGetJobRequest(ctx context.Context, req *types.GetJobRequest) (*types.GetJobResponse, error) {
+func (t *MemoryTransport) SendGetJobRequest(ctx context.Context, req *GetJobRequest) (*GetJobResponse, error) {
job, ok, err := t.scheduler.HandleGetJob(ctx, req.BuilderID)
if err != nil {
return nil, err
}
- return &types.GetJobResponse{
+ return &GetJobResponse{
Job: job,
OK: ok,
}, nil
}
-func (t *MemoryTransport) SendCompleteJob(ctx context.Context, req *types.CompleteJobRequest) error {
+func (t *MemoryTransport) SendCompleteJob(ctx context.Context, req *CompleteJobRequest) error {
return t.scheduler.HandleCompleteJob(ctx, req.BuilderID, req.Job)
}
-func (t *MemoryTransport) SendSyncJob(ctx context.Context, req *types.SyncJobRequest) error {
+func (t *MemoryTransport) SendSyncJob(ctx context.Context, req *SyncJobRequest) error {
return t.scheduler.HandleSyncJob(ctx, req.BuilderID, req.Job)
}
|
feat
|
grpc transport (#15218)
|
cdb934ccee38375bdb9cc2263e398e1a7ac56db6
|
2024-03-19 18:50:33
|
Cyril Tovena
|
feat(blooms): Dedupe download queue items to reduce queue size (#12222)
| false
|
diff --git a/go.mod b/go.mod
index 5b4c32ddb214b..00383c41e0c46 100644
--- a/go.mod
+++ b/go.mod
@@ -120,6 +120,7 @@ require (
github.com/IBM/ibm-cos-sdk-go v1.10.0
github.com/axiomhq/hyperloglog v0.0.0-20240124082744-24bca3a5b39b
github.com/d4l3k/messagediff v1.2.1
+ github.com/dolthub/swiss v0.2.1
github.com/efficientgo/core v1.0.0-rc.2
github.com/fsnotify/fsnotify v1.6.0
github.com/gogo/googleapis v1.4.0
@@ -199,6 +200,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
+ github.com/dolthub/maphash v0.1.0 // indirect
github.com/eapache/go-resiliency v1.3.0 // indirect
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect
github.com/eapache/queue v1.1.0 // indirect
diff --git a/go.sum b/go.sum
index 744c904e823c2..1d7597a61ea96 100644
--- a/go.sum
+++ b/go.sum
@@ -523,6 +523,10 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
+github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
+github.com/dolthub/swiss v0.2.1 h1:gs2osYs5SJkAaH5/ggVJqXQxRXtWshF6uE0lgR/Y3Gw=
+github.com/dolthub/swiss v0.2.1/go.mod h1:8AhKZZ1HK7g18j7v7k6c5cYIGEZJcPn0ARsai8cUrh0=
github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g=
github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
index bf0d200f44719..8a5a1f528fa8d 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
@@ -9,6 +9,7 @@ import (
"sync"
"time"
+ "github.com/dolthub/swiss"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/pkg/errors"
@@ -20,7 +21,7 @@ import (
"github.com/grafana/loki/pkg/util/constants"
)
-var downloadQueueCapacity = 100000
+var downloadQueueCapacity = 10000
type options struct {
ignoreNotFound bool // ignore 404s from object storage; default=true
@@ -205,6 +206,7 @@ func (f *Fetcher) FetchBlocks(ctx context.Context, refs []BlockRef, opts ...Fetc
item: refs[i],
key: key,
idx: i,
+ async: cfg.fetchAsync,
results: responses,
errors: errors,
})
@@ -397,6 +399,7 @@ type downloadRequest[T any, R any] struct {
item T
key string
idx int
+ async bool
results chan<- downloadResponse[R]
errors chan<- error
}
@@ -408,12 +411,14 @@ type downloadResponse[R any] struct {
}
type downloadQueue[T any, R any] struct {
- queue chan downloadRequest[T, R]
- mu keymutex.KeyMutex
- wg sync.WaitGroup
- done chan struct{}
- process processFunc[T, R]
- logger log.Logger
+ queue chan downloadRequest[T, R]
+ enqueued *swiss.Map[string, struct{}]
+ enqueuedMutex sync.Mutex
+ mu keymutex.KeyMutex
+ wg sync.WaitGroup
+ done chan struct{}
+ process processFunc[T, R]
+ logger log.Logger
}
func newDownloadQueue[T any, R any](size, workers int, process processFunc[T, R], logger log.Logger) (*downloadQueue[T, R], error) {
@@ -424,11 +429,12 @@ func newDownloadQueue[T any, R any](size, workers int, process processFunc[T, R]
return nil, errors.New("queue requires at least 1 worker")
}
q := &downloadQueue[T, R]{
- queue: make(chan downloadRequest[T, R], size),
- mu: keymutex.NewHashed(workers),
- done: make(chan struct{}),
- process: process,
- logger: logger,
+ queue: make(chan downloadRequest[T, R], size),
+ enqueued: swiss.NewMap[string, struct{}](uint32(size)),
+ mu: keymutex.NewHashed(workers),
+ done: make(chan struct{}),
+ process: process,
+ logger: logger,
}
for i := 0; i < workers; i++ {
q.wg.Add(1)
@@ -438,7 +444,23 @@ func newDownloadQueue[T any, R any](size, workers int, process processFunc[T, R]
}
func (q *downloadQueue[T, R]) enqueue(t downloadRequest[T, R]) {
- q.queue <- t
+ if !t.async {
+ q.queue <- t
+ }
+ // for async task we attempt to dedupe task already in progress.
+ q.enqueuedMutex.Lock()
+ defer q.enqueuedMutex.Unlock()
+ if q.enqueued.Has(t.key) {
+ return
+ }
+ select {
+ case q.queue <- t:
+ q.enqueued.Put(t.key, struct{}{})
+ default:
+ // todo we probably want a metric on dropped items
+ level.Warn(q.logger).Log("msg", "download queue is full, dropping item", "key", t.key)
+ return
+ }
}
func (q *downloadQueue[T, R]) runWorker() {
@@ -464,6 +486,11 @@ func (q *downloadQueue[T, R]) do(ctx context.Context, task downloadRequest[T, R]
if err != nil {
level.Error(q.logger).Log("msg", "failed to unlock key in block lock", "key", task.key, "err", err)
}
+ if task.async {
+ q.enqueuedMutex.Lock()
+ _ = q.enqueued.Delete(task.key)
+ q.enqueuedMutex.Unlock()
+ }
}()
q.process(ctx, task)
diff --git a/vendor/github.com/dolthub/maphash/.gitignore b/vendor/github.com/dolthub/maphash/.gitignore
new file mode 100644
index 0000000000000..977a7cadfa203
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/.gitignore
@@ -0,0 +1,2 @@
+*.idea
+*.test
\ No newline at end of file
diff --git a/vendor/github.com/dolthub/maphash/LICENSE b/vendor/github.com/dolthub/maphash/LICENSE
new file mode 100644
index 0000000000000..261eeb9e9f8b2
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/dolthub/maphash/README.md b/vendor/github.com/dolthub/maphash/README.md
new file mode 100644
index 0000000000000..d91530f99f918
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/README.md
@@ -0,0 +1,4 @@
+# maphash
+
+Hash any `comparable` type using Golang's fast runtime hash.
+Uses [AES](https://en.wikipedia.org/wiki/AES_instruction_set) instructions when available.
\ No newline at end of file
diff --git a/vendor/github.com/dolthub/maphash/hasher.go b/vendor/github.com/dolthub/maphash/hasher.go
new file mode 100644
index 0000000000000..ef53596a24203
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/hasher.go
@@ -0,0 +1,48 @@
+// Copyright 2022 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package maphash
+
+import "unsafe"
+
+// Hasher hashes values of type K.
+// Uses runtime AES-based hashing.
+type Hasher[K comparable] struct {
+ hash hashfn
+ seed uintptr
+}
+
+// NewHasher creates a new Hasher[K] with a random seed.
+func NewHasher[K comparable]() Hasher[K] {
+ return Hasher[K]{
+ hash: getRuntimeHasher[K](),
+ seed: newHashSeed(),
+ }
+}
+
+// NewSeed returns a copy of |h| with a new hash seed.
+func NewSeed[K comparable](h Hasher[K]) Hasher[K] {
+ return Hasher[K]{
+ hash: h.hash,
+ seed: newHashSeed(),
+ }
+}
+
+// Hash hashes |key|.
+func (h Hasher[K]) Hash(key K) uint64 {
+ // promise to the compiler that pointer
+ // |p| does not escape the stack.
+ p := noescape(unsafe.Pointer(&key))
+ return uint64(h.hash(p, h.seed))
+}
diff --git a/vendor/github.com/dolthub/maphash/runtime.go b/vendor/github.com/dolthub/maphash/runtime.go
new file mode 100644
index 0000000000000..29cd6a8edf889
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/runtime.go
@@ -0,0 +1,111 @@
+// Copyright 2022 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file incorporates work covered by the following copyright and
+// permission notice:
+//
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18 || go1.19
+// +build go1.18 go1.19
+
+package maphash
+
+import (
+ "math/rand"
+ "unsafe"
+)
+
+type hashfn func(unsafe.Pointer, uintptr) uintptr
+
+func getRuntimeHasher[K comparable]() (h hashfn) {
+ a := any(make(map[K]struct{}))
+ i := (*mapiface)(unsafe.Pointer(&a))
+ h = i.typ.hasher
+ return
+}
+
+func newHashSeed() uintptr {
+ return uintptr(rand.Int())
+}
+
+// noescape hides a pointer from escape analysis. It is the identity function
+// but escape analysis doesn't think the output depends on the input.
+// noescape is inlined and currently compiles down to zero instructions.
+// USE CAREFULLY!
+// This was copied from the runtime (via pkg "strings"); see issues 23382 and 7921.
+//
+//go:nosplit
+//go:nocheckptr
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+ x := uintptr(p)
+ return unsafe.Pointer(x ^ 0)
+}
+
+type mapiface struct {
+ typ *maptype
+ val *hmap
+}
+
+// go/src/runtime/type.go
+type maptype struct {
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
+ keysize uint8
+ elemsize uint8
+ bucketsize uint16
+ flags uint32
+}
+
+// go/src/runtime/map.go
+type hmap struct {
+ count int
+ flags uint8
+ B uint8
+ noverflow uint16
+ // hash seed
+ hash0 uint32
+ buckets unsafe.Pointer
+ oldbuckets unsafe.Pointer
+ nevacuate uintptr
+ // true type is *mapextra
+ // but we don't need this data
+ extra unsafe.Pointer
+}
+
+// go/src/runtime/type.go
+type tflag uint8
+type nameOff int32
+type typeOff int32
+
+// go/src/runtime/type.go
+type _type struct {
+ size uintptr
+ ptrdata uintptr
+ hash uint32
+ tflag tflag
+ align uint8
+ fieldAlign uint8
+ kind uint8
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ gcdata *byte
+ str nameOff
+ ptrToThis typeOff
+}
diff --git a/vendor/github.com/dolthub/swiss/.gitignore b/vendor/github.com/dolthub/swiss/.gitignore
new file mode 100644
index 0000000000000..1f9adf93b65eb
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/.gitignore
@@ -0,0 +1,5 @@
+**/.idea/
+.vscode
+.run
+venv
+.DS_Store
\ No newline at end of file
diff --git a/vendor/github.com/dolthub/swiss/LICENSE b/vendor/github.com/dolthub/swiss/LICENSE
new file mode 100644
index 0000000000000..261eeb9e9f8b2
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/dolthub/swiss/README.md b/vendor/github.com/dolthub/swiss/README.md
new file mode 100644
index 0000000000000..71c6f7dd08c18
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/README.md
@@ -0,0 +1,54 @@
+# SwissMap
+
+SwissMap is a hash table adapated from the "SwissTable" family of hash tables from [Abseil](https://abseil.io/blog/20180927-swisstables). It uses [AES](https://github.com/dolthub/maphash) instructions for fast-hashing and performs key lookups in parallel using [SSE](https://en.wikipedia.org/wiki/Streaming_SIMD_Extensions) instructions. Because of these optimizations, SwissMap is faster and more memory efficient than Golang's built-in `map`. If you'd like to learn more about its design and implementation, check out this [blog post](https://www.dolthub.com/blog/2023-03-28-swiss-map/) announcing its release.
+
+
+## Example
+
+SwissMap exposes the same interface as the built-in `map`. Give it a try using this [Go playground](https://go.dev/play/p/JPDC5WhYN7g).
+
+```go
+package main
+
+import "github.com/dolthub/swiss"
+
+func main() {
+ m := swiss.NewMap[string, int](42)
+
+ m.Put("foo", 1)
+ m.Put("bar", 2)
+
+ m.Iter(func(k string, v int) (stop bool) {
+ println("iter", k, v)
+ return false // continue
+ })
+
+ if x, ok := m.Get("foo"); ok {
+ println(x)
+ }
+ if m.Has("bar") {
+ x, _ := m.Get("bar")
+ println(x)
+ }
+
+ m.Put("foo", -1)
+ m.Delete("bar")
+
+ if x, ok := m.Get("foo"); ok {
+ println(x)
+ }
+ if m.Has("bar") {
+ x, _ := m.Get("bar")
+ println(x)
+ }
+
+ m.Clear()
+
+ // Output:
+ // iter foo 1
+ // iter bar 2
+ // 1
+ // 2
+ // -1
+}
+```
diff --git a/vendor/github.com/dolthub/swiss/bits.go b/vendor/github.com/dolthub/swiss/bits.go
new file mode 100644
index 0000000000000..f435b6dc914c4
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/bits.go
@@ -0,0 +1,58 @@
+// Copyright 2023 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !amd64 || nosimd
+
+package swiss
+
+import (
+ "math/bits"
+ "unsafe"
+)
+
+const (
+ groupSize = 8
+ maxAvgGroupLoad = 7
+
+ loBits uint64 = 0x0101010101010101
+ hiBits uint64 = 0x8080808080808080
+)
+
+type bitset uint64
+
+func metaMatchH2(m *metadata, h h2) bitset {
+ // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ return hasZeroByte(castUint64(m) ^ (loBits * uint64(h)))
+}
+
+func metaMatchEmpty(m *metadata) bitset {
+ return hasZeroByte(castUint64(m) ^ hiBits)
+}
+
+func nextMatch(b *bitset) uint32 {
+ s := uint32(bits.TrailingZeros64(uint64(*b)))
+ *b &= ^(1 << s) // clear bit |s|
+ return s >> 3 // div by 8
+}
+
+func hasZeroByte(x uint64) bitset {
+ return bitset(((x - loBits) & ^(x)) & hiBits)
+}
+
+func castUint64(m *metadata) uint64 {
+ return *(*uint64)((unsafe.Pointer)(m))
+}
+
+//go:linkname fastrand runtime.fastrand
+func fastrand() uint32
diff --git a/vendor/github.com/dolthub/swiss/bits_amd64.go b/vendor/github.com/dolthub/swiss/bits_amd64.go
new file mode 100644
index 0000000000000..8b91f57cf2725
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/bits_amd64.go
@@ -0,0 +1,50 @@
+// Copyright 2023 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build amd64 && !nosimd
+
+package swiss
+
+import (
+ "math/bits"
+ _ "unsafe"
+
+ "github.com/dolthub/swiss/simd"
+)
+
+const (
+ groupSize = 16
+ maxAvgGroupLoad = 14
+)
+
+type bitset uint16
+
+func metaMatchH2(m *metadata, h h2) bitset {
+ b := simd.MatchMetadata((*[16]int8)(m), int8(h))
+ return bitset(b)
+}
+
+func metaMatchEmpty(m *metadata) bitset {
+ b := simd.MatchMetadata((*[16]int8)(m), empty)
+ return bitset(b)
+}
+
+func nextMatch(b *bitset) (s uint32) {
+ s = uint32(bits.TrailingZeros16(uint16(*b)))
+ *b &= ^(1 << s) // clear bit |s|
+ return
+}
+
+//go:linkname fastrand runtime.fastrand
+func fastrand() uint32
diff --git a/vendor/github.com/dolthub/swiss/map.go b/vendor/github.com/dolthub/swiss/map.go
new file mode 100644
index 0000000000000..e5ad203866bd9
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/map.go
@@ -0,0 +1,359 @@
+// Copyright 2023 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swiss
+
+import (
+ "github.com/dolthub/maphash"
+)
+
+const (
+ maxLoadFactor = float32(maxAvgGroupLoad) / float32(groupSize)
+)
+
+// Map is an open-addressing hash map
+// based on Abseil's flat_hash_map.
+type Map[K comparable, V any] struct {
+ ctrl []metadata
+ groups []group[K, V]
+ hash maphash.Hasher[K]
+ resident uint32
+ dead uint32
+ limit uint32
+}
+
+// metadata is the h2 metadata array for a group.
+// find operations first probe the controls bytes
+// to filter candidates before matching keys
+type metadata [groupSize]int8
+
+// group is a group of 16 key-value pairs
+type group[K comparable, V any] struct {
+ keys [groupSize]K
+ values [groupSize]V
+}
+
+const (
+ h1Mask uint64 = 0xffff_ffff_ffff_ff80
+ h2Mask uint64 = 0x0000_0000_0000_007f
+ empty int8 = -128 // 0b1000_0000
+ tombstone int8 = -2 // 0b1111_1110
+)
+
+// h1 is a 57 bit hash prefix
+type h1 uint64
+
+// h2 is a 7 bit hash suffix
+type h2 int8
+
+// NewMap constructs a Map.
+func NewMap[K comparable, V any](sz uint32) (m *Map[K, V]) {
+ groups := numGroups(sz)
+ m = &Map[K, V]{
+ ctrl: make([]metadata, groups),
+ groups: make([]group[K, V], groups),
+ hash: maphash.NewHasher[K](),
+ limit: groups * maxAvgGroupLoad,
+ }
+ for i := range m.ctrl {
+ m.ctrl[i] = newEmptyMetadata()
+ }
+ return
+}
+
+// Has returns true if |key| is present in |m|.
+func (m *Map[K, V]) Has(key K) (ok bool) {
+ hi, lo := splitHash(m.hash.Hash(key))
+ g := probeStart(hi, len(m.groups))
+ for { // inlined find loop
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s := nextMatch(&matches)
+ if key == m.groups[g].keys[s] {
+ ok = true
+ return
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 {
+ ok = false
+ return
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+// Get returns the |value| mapped by |key| if one exists.
+func (m *Map[K, V]) Get(key K) (value V, ok bool) {
+ hi, lo := splitHash(m.hash.Hash(key))
+ g := probeStart(hi, len(m.groups))
+ for { // inlined find loop
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s := nextMatch(&matches)
+ if key == m.groups[g].keys[s] {
+ value, ok = m.groups[g].values[s], true
+ return
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 {
+ ok = false
+ return
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+// Put attempts to insert |key| and |value|
+func (m *Map[K, V]) Put(key K, value V) {
+ if m.resident >= m.limit {
+ m.rehash(m.nextSize())
+ }
+ hi, lo := splitHash(m.hash.Hash(key))
+ g := probeStart(hi, len(m.groups))
+ for { // inlined find loop
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s := nextMatch(&matches)
+ if key == m.groups[g].keys[s] { // update
+ m.groups[g].keys[s] = key
+ m.groups[g].values[s] = value
+ return
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 { // insert
+ s := nextMatch(&matches)
+ m.groups[g].keys[s] = key
+ m.groups[g].values[s] = value
+ m.ctrl[g][s] = int8(lo)
+ m.resident++
+ return
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+// Delete attempts to remove |key|, returns true successful.
+func (m *Map[K, V]) Delete(key K) (ok bool) {
+ hi, lo := splitHash(m.hash.Hash(key))
+ g := probeStart(hi, len(m.groups))
+ for {
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s := nextMatch(&matches)
+ if key == m.groups[g].keys[s] {
+ ok = true
+ // optimization: if |m.ctrl[g]| contains any empty
+ // metadata bytes, we can physically delete |key|
+ // rather than placing a tombstone.
+ // The observation is that any probes into group |g|
+ // would already be terminated by the existing empty
+ // slot, and therefore reclaiming slot |s| will not
+ // cause premature termination of probes into |g|.
+ if metaMatchEmpty(&m.ctrl[g]) != 0 {
+ m.ctrl[g][s] = empty
+ m.resident--
+ } else {
+ m.ctrl[g][s] = tombstone
+ m.dead++
+ }
+ var k K
+ var v V
+ m.groups[g].keys[s] = k
+ m.groups[g].values[s] = v
+ return
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 { // |key| absent
+ ok = false
+ return
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+// Iter iterates the elements of the Map, passing them to the callback.
+// It guarantees that any key in the Map will be visited only once, and
+// for un-mutated Maps, every key will be visited once. If the Map is
+// Mutated during iteration, mutations will be reflected on return from
+// Iter, but the set of keys visited by Iter is non-deterministic.
+func (m *Map[K, V]) Iter(cb func(k K, v V) (stop bool)) {
+ // take a consistent view of the table in case
+ // we rehash during iteration
+ ctrl, groups := m.ctrl, m.groups
+ // pick a random starting group
+ g := randIntN(len(groups))
+ for n := 0; n < len(groups); n++ {
+ for s, c := range ctrl[g] {
+ if c == empty || c == tombstone {
+ continue
+ }
+ k, v := groups[g].keys[s], groups[g].values[s]
+ if stop := cb(k, v); stop {
+ return
+ }
+ }
+ g++
+ if g >= uint32(len(groups)) {
+ g = 0
+ }
+ }
+}
+
+// Clear removes all elements from the Map.
+func (m *Map[K, V]) Clear() {
+ for i, c := range m.ctrl {
+ for j := range c {
+ m.ctrl[i][j] = empty
+ }
+ }
+ var k K
+ var v V
+ for i := range m.groups {
+ g := &m.groups[i]
+ for i := range g.keys {
+ g.keys[i] = k
+ g.values[i] = v
+ }
+ }
+ m.resident, m.dead = 0, 0
+}
+
+// Count returns the number of elements in the Map.
+func (m *Map[K, V]) Count() int {
+ return int(m.resident - m.dead)
+}
+
+// Capacity returns the number of additional elements
+// the can be added to the Map before resizing.
+func (m *Map[K, V]) Capacity() int {
+ return int(m.limit - m.resident)
+}
+
+// find returns the location of |key| if present, or its insertion location if absent.
+// for performance, find is manually inlined into public methods.
+func (m *Map[K, V]) find(key K, hi h1, lo h2) (g, s uint32, ok bool) {
+ g = probeStart(hi, len(m.groups))
+ for {
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s = nextMatch(&matches)
+ if key == m.groups[g].keys[s] {
+ return g, s, true
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 {
+ s = nextMatch(&matches)
+ return g, s, false
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+func (m *Map[K, V]) nextSize() (n uint32) {
+ n = uint32(len(m.groups)) * 2
+ if m.dead >= (m.resident / 2) {
+ n = uint32(len(m.groups))
+ }
+ return
+}
+
+func (m *Map[K, V]) rehash(n uint32) {
+ groups, ctrl := m.groups, m.ctrl
+ m.groups = make([]group[K, V], n)
+ m.ctrl = make([]metadata, n)
+ for i := range m.ctrl {
+ m.ctrl[i] = newEmptyMetadata()
+ }
+ m.hash = maphash.NewSeed(m.hash)
+ m.limit = n * maxAvgGroupLoad
+ m.resident, m.dead = 0, 0
+ for g := range ctrl {
+ for s := range ctrl[g] {
+ c := ctrl[g][s]
+ if c == empty || c == tombstone {
+ continue
+ }
+ m.Put(groups[g].keys[s], groups[g].values[s])
+ }
+ }
+}
+
+func (m *Map[K, V]) loadFactor() float32 {
+ slots := float32(len(m.groups) * groupSize)
+ return float32(m.resident-m.dead) / slots
+}
+
+// numGroups returns the minimum number of groups needed to store |n| elems.
+func numGroups(n uint32) (groups uint32) {
+ groups = (n + maxAvgGroupLoad - 1) / maxAvgGroupLoad
+ if groups == 0 {
+ groups = 1
+ }
+ return
+}
+
+func newEmptyMetadata() (meta metadata) {
+ for i := range meta {
+ meta[i] = empty
+ }
+ return
+}
+
+func splitHash(h uint64) (h1, h2) {
+ return h1((h & h1Mask) >> 7), h2(h & h2Mask)
+}
+
+func probeStart(hi h1, groups int) uint32 {
+ return fastModN(uint32(hi), uint32(groups))
+}
+
+// lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+func fastModN(x, n uint32) uint32 {
+ return uint32((uint64(x) * uint64(n)) >> 32)
+}
+
+// randIntN returns a random number in the interval [0, n).
+func randIntN(n int) uint32 {
+ return fastModN(fastrand(), uint32(n))
+}
diff --git a/vendor/github.com/dolthub/swiss/simd/match.s b/vendor/github.com/dolthub/swiss/simd/match.s
new file mode 100644
index 0000000000000..4ae29e77b9abc
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/simd/match.s
@@ -0,0 +1,19 @@
+// Code generated by command: go run asm.go -out match.s -stubs match_amd64.go. DO NOT EDIT.
+
+//go:build amd64
+
+#include "textflag.h"
+
+// func MatchMetadata(metadata *[16]int8, hash int8) uint16
+// Requires: SSE2, SSSE3
+TEXT ·MatchMetadata(SB), NOSPLIT, $0-18
+ MOVQ metadata+0(FP), AX
+ MOVBLSX hash+8(FP), CX
+ MOVD CX, X0
+ PXOR X1, X1
+ PSHUFB X1, X0
+ MOVOU (AX), X1
+ PCMPEQB X1, X0
+ PMOVMSKB X0, AX
+ MOVW AX, ret+16(FP)
+ RET
diff --git a/vendor/github.com/dolthub/swiss/simd/match_amd64.go b/vendor/github.com/dolthub/swiss/simd/match_amd64.go
new file mode 100644
index 0000000000000..538c8e1248fbd
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/simd/match_amd64.go
@@ -0,0 +1,9 @@
+// Code generated by command: go run asm.go -out match.s -stubs match_amd64.go. DO NOT EDIT.
+
+//go:build amd64
+
+package simd
+
+// MatchMetadata performs a 16-way probe of |metadata| using SSE instructions
+// nb: |metadata| must be an aligned pointer
+func MatchMetadata(metadata *[16]int8, hash int8) uint16
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 2992d1e44075e..d9c9bd146e7b3 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -545,6 +545,13 @@ github.com/docker/go-plugins-helpers/sdk
# github.com/docker/go-units v0.5.0
## explicit
github.com/docker/go-units
+# github.com/dolthub/maphash v0.1.0
+## explicit; go 1.18
+github.com/dolthub/maphash
+# github.com/dolthub/swiss v0.2.1
+## explicit; go 1.18
+github.com/dolthub/swiss
+github.com/dolthub/swiss/simd
# github.com/drone/envsubst v1.0.3
## explicit; go 1.13
github.com/drone/envsubst
|
feat
|
Dedupe download queue items to reduce queue size (#12222)
|
edc6b0bff76714ff584c20d7ff0235461a4f4a88
|
2023-03-31 17:57:23
|
Ed Welch
|
loki: Add a limit for the [range] value on range queries (#8343)
| false
|
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
index 7e76a5ef62ff5..42cd103b9e690 100644
--- a/docs/sources/configuration/_index.md
+++ b/docs/sources/configuration/_index.md
@@ -2281,6 +2281,11 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# CLI flag: -store.max-query-length
[max_query_length: <duration> | default = 30d1h]
+# Limit the length of the [range] inside a range query. Default is 0 or
+# unlimited
+# CLI flag: -querier.max-query-range
+[max_query_range: <duration> | default = 0s]
+
# Maximum number of queries that will be scheduled in parallel by the frontend.
# CLI flag: -querier.max-query-parallelism
[max_query_parallelism: <int> | default = 32]
diff --git a/pkg/logcli/client/file.go b/pkg/logcli/client/file.go
index 97082db507c69..229b625804a65 100644
--- a/pkg/logcli/client/file.go
+++ b/pkg/logcli/client/file.go
@@ -190,6 +190,10 @@ func (l *limiter) MaxQuerySeries(ctx context.Context, userID string) int {
return l.n
}
+func (l *limiter) MaxQueryRange(ctx context.Context, userID string) time.Duration {
+ return 0 * time.Second
+}
+
func (l *limiter) QueryTimeout(ctx context.Context, userID string) time.Duration {
return time.Minute * 5
}
diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go
index b51898c3b47b8..fb3b8a99f7b95 100644
--- a/pkg/logql/engine.go
+++ b/pkg/logql/engine.go
@@ -17,6 +17,7 @@ import (
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
promql_parser "github.com/prometheus/prometheus/promql/parser"
@@ -320,21 +321,31 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_
return q.evalVector(ctx, vec)
}
- expr, err := optimizeSampleExpr(expr)
+ tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, err
}
- stepEvaluator, err := q.evaluator.StepEvaluator(ctx, q.evaluator, expr, q.params)
+ maxIntervalCapture := func(id string) time.Duration { return q.limits.MaxQueryRange(ctx, id) }
+ maxQueryInterval := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, maxIntervalCapture)
+ if maxQueryInterval != 0 {
+ err = q.checkIntervalLimit(expr, maxQueryInterval)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ expr, err = optimizeSampleExpr(expr)
if err != nil {
return nil, err
}
- defer util.LogErrorWithContext(ctx, "closing SampleExpr", stepEvaluator.Close)
- tenantIDs, err := tenant.TenantIDs(ctx)
+ stepEvaluator, err := q.evaluator.StepEvaluator(ctx, q.evaluator, expr, q.params)
if err != nil {
return nil, err
}
+ defer util.LogErrorWithContext(ctx, "closing SampleExpr", stepEvaluator.Close)
+
maxSeriesCapture := func(id string) int { return q.limits.MaxQuerySeries(ctx, id) }
maxSeries := validation.SmallestPositiveIntPerTenant(tenantIDs, maxSeriesCapture)
seriesIndex := map[uint64]*promql.Series{}
@@ -406,6 +417,20 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_
return result, stepEvaluator.Error()
}
+func (q *query) checkIntervalLimit(expr syntax.SampleExpr, limit time.Duration) error {
+ var err error
+ expr.Walk(func(e interface{}) {
+ switch e := e.(type) {
+ case *syntax.RangeAggregationExpr:
+ if e.Left == nil || e.Left.Interval <= limit {
+ return
+ }
+ err = fmt.Errorf("%w: [%s] > [%s]", logqlmodel.ErrIntervalLimit, model.Duration(e.Left.Interval), model.Duration(limit))
+ }
+ })
+ return err
+}
+
func (q *query) evalLiteral(_ context.Context, expr *syntax.LiteralExpr) (promql_parser.Value, error) {
value, err := expr.Value()
if err != nil {
diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go
index 52830d83c582d..1ba08343b6999 100644
--- a/pkg/logql/engine_test.go
+++ b/pkg/logql/engine_test.go
@@ -2444,6 +2444,38 @@ func TestEngine_MaxSeries(t *testing.T) {
}
}
+func TestEngine_MaxRangeInterval(t *testing.T) {
+ eng := NewEngine(EngineOpts{}, getLocalQuerier(100000), &fakeLimits{rangeLimit: 24 * time.Hour, maxSeries: 100000}, log.NewNopLogger())
+
+ for _, test := range []struct {
+ qs string
+ direction logproto.Direction
+ expectLimitErr bool
+ }{
+ {`topk(1,rate(({app=~"foo|bar"})[2d]))`, logproto.FORWARD, true},
+ {`topk(1,rate(({app=~"foo|bar"})[1d]))`, logproto.FORWARD, false},
+ {`topk(1,rate({app=~"foo|bar"}[12h]) / (rate({app="baz"}[23h]) + rate({app="fiz"}[25h])))`, logproto.FORWARD, true},
+ } {
+ t.Run(test.qs, func(t *testing.T) {
+ q := eng.Query(LiteralParams{
+ qs: test.qs,
+ start: time.Unix(0, 0),
+ end: time.Unix(100000, 0),
+ step: 60 * time.Second,
+ direction: test.direction,
+ limit: 1000,
+ })
+ _, err := q.Exec(user.InjectOrgID(context.Background(), "fake"))
+ if test.expectLimitErr {
+ require.Error(t, err)
+ require.ErrorIs(t, err, logqlmodel.ErrIntervalLimit)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
+
// go test -mod=vendor ./pkg/logql/ -bench=. -benchmem -memprofile memprofile.out -cpuprofile cpuprofile.out
func BenchmarkRangeQuery100000(b *testing.B) {
benchmarkRangeQuery(int64(100000), b)
diff --git a/pkg/logql/limits.go b/pkg/logql/limits.go
index 8267070c746f7..df45707e51fb2 100644
--- a/pkg/logql/limits.go
+++ b/pkg/logql/limits.go
@@ -15,6 +15,7 @@ var (
// Limits allow the engine to fetch limits for a given users.
type Limits interface {
MaxQuerySeries(context.Context, string) int
+ MaxQueryRange(ctx context.Context, userID string) time.Duration
QueryTimeout(context.Context, string) time.Duration
BlockedQueries(context.Context, string) []*validation.BlockedQuery
}
@@ -23,6 +24,7 @@ type fakeLimits struct {
maxSeries int
timeout time.Duration
blockedQueries []*validation.BlockedQuery
+ rangeLimit time.Duration
requiredLabels []string
}
@@ -30,6 +32,10 @@ func (f fakeLimits) MaxQuerySeries(ctx context.Context, userID string) int {
return f.maxSeries
}
+func (f fakeLimits) MaxQueryRange(ctx context.Context, userID string) time.Duration {
+ return f.rangeLimit
+}
+
func (f fakeLimits) QueryTimeout(ctx context.Context, userID string) time.Duration {
return f.timeout
}
diff --git a/pkg/logql/log/filter.go b/pkg/logql/log/filter.go
index 385bb77c50b4a..0e044b23f339f 100644
--- a/pkg/logql/log/filter.go
+++ b/pkg/logql/log/filter.go
@@ -3,10 +3,11 @@ package log
import (
"bytes"
"fmt"
- "github.com/grafana/loki/pkg/util"
"unicode"
"unicode/utf8"
+ "github.com/grafana/loki/pkg/util"
+
"github.com/grafana/regexp"
"github.com/grafana/regexp/syntax"
diff --git a/pkg/logql/log/parser_hints_test.go b/pkg/logql/log/parser_hints_test.go
index 2c5b75e8bd54e..433b1be04565d 100644
--- a/pkg/logql/log/parser_hints_test.go
+++ b/pkg/logql/log/parser_hints_test.go
@@ -2,9 +2,10 @@
package log_test
import (
- "github.com/grafana/loki/pkg/logql/log"
"testing"
+ "github.com/grafana/loki/pkg/logql/log"
+
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go
index 8422c1f7c0e6b..52e8d73966fcd 100644
--- a/pkg/logql/shardmapper.go
+++ b/pkg/logql/shardmapper.go
@@ -4,10 +4,11 @@ import (
"fmt"
"github.com/go-kit/log/level"
- "github.com/grafana/loki/pkg/util/math"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/grafana/loki/pkg/util/math"
+
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/querier/astmapper"
"github.com/grafana/loki/pkg/storage/stores/index/stats"
diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go
index 515d6f6835894..0e35b9d7ea62f 100644
--- a/pkg/logql/syntax/ast.go
+++ b/pkg/logql/syntax/ast.go
@@ -2,22 +2,24 @@ package syntax
import (
"fmt"
- "github.com/grafana/loki/pkg/util"
"math"
"regexp"
"strconv"
"strings"
"time"
+ "github.com/grafana/loki/pkg/util"
+
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
+ "github.com/grafana/regexp/syntax"
+
"github.com/grafana/loki/pkg/logql/log"
"github.com/grafana/loki/pkg/logql/log/logfmt"
"github.com/grafana/loki/pkg/logqlmodel"
- "github.com/grafana/regexp/syntax"
)
// Expr is the root expression which can be a SampleExpr or LogSelectorExpr
diff --git a/pkg/logqlmodel/error.go b/pkg/logqlmodel/error.go
index 498adf6a341f8..9491a8f3342c1 100644
--- a/pkg/logqlmodel/error.go
+++ b/pkg/logqlmodel/error.go
@@ -13,6 +13,7 @@ var (
ErrParse = errors.New("failed to parse the log query")
ErrPipeline = errors.New("failed execute pipeline")
ErrLimit = errors.New("limit reached while evaluating the query")
+ ErrIntervalLimit = errors.New("[interval] value exceeds limit")
ErrBlocked = errors.New("query blocked by policy")
ErrorLabel = "__error__"
PreserveErrorLabel = "__preserve_error__"
diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go
index f5516ebb6b8d6..8d8689af24102 100644
--- a/pkg/querier/queryrange/roundtrip_test.go
+++ b/pkg/querier/queryrange/roundtrip_test.go
@@ -922,6 +922,10 @@ func (f fakeLimits) MaxQueryLength(context.Context, string) time.Duration {
return f.maxQueryLength
}
+func (f fakeLimits) MaxQueryRange(context.Context, string) time.Duration {
+ return time.Second
+}
+
func (f fakeLimits) MaxQueryParallelism(context.Context, string) int {
return f.maxQueryParallelism
}
diff --git a/pkg/util/querylimits/limiter.go b/pkg/util/querylimits/limiter.go
index 71ede2af7b1b5..051e31270f137 100644
--- a/pkg/util/querylimits/limiter.go
+++ b/pkg/util/querylimits/limiter.go
@@ -46,6 +46,17 @@ func (l *Limiter) MaxQueryLookback(ctx context.Context, userID string) time.Dura
return time.Duration(requestLimits.MaxQueryLookback)
}
+// MaxQueryRange retruns the max query range/interval of a query.
+func (l *Limiter) MaxQueryRange(ctx context.Context, userID string) time.Duration {
+ original := l.CombinedLimits.MaxQueryRange(ctx, userID)
+ requestLimits := ExtractQueryLimitsContext(ctx)
+ if requestLimits == nil || requestLimits.MaxQueryRange == 0 || time.Duration(requestLimits.MaxQueryRange) > original {
+ return original
+ }
+ level.Debug(logutil.WithContext(ctx, l.logger)).Log("msg", "using request limit", "limit", "MaxQueryRange", "tenant", userID, "query-limit", time.Duration(requestLimits.MaxQueryRange), "original-limit", original)
+ return time.Duration(requestLimits.MaxQueryRange)
+}
+
// MaxEntriesLimitPerQuery returns the limit to number of entries the querier should return per query.
func (l *Limiter) MaxEntriesLimitPerQuery(ctx context.Context, userID string) int {
original := l.CombinedLimits.MaxEntriesLimitPerQuery(ctx, userID)
diff --git a/pkg/util/querylimits/limiter_test.go b/pkg/util/querylimits/limiter_test.go
index a963dba801f6e..ad80fa34ec186 100644
--- a/pkg/util/querylimits/limiter_test.go
+++ b/pkg/util/querylimits/limiter_test.go
@@ -37,6 +37,7 @@ func TestLimiter_Defaults(t *testing.T) {
QueryTimeout: model.Duration(30 * time.Second),
MaxQueryLookback: model.Duration(30 * time.Second),
MaxQueryLength: model.Duration(30 * time.Second),
+ MaxQueryRange: model.Duration(30 * time.Second),
MaxEntriesLimitPerQuery: 10,
RequiredLabels: []string{"foo", "bar"},
RequiredNumberLabels: 10,
@@ -50,6 +51,7 @@ func TestLimiter_Defaults(t *testing.T) {
expectedLimits := QueryLimits{
MaxQueryLength: model.Duration(30 * time.Second),
MaxQueryLookback: model.Duration(30 * time.Second),
+ MaxQueryRange: model.Duration(30 * time.Second),
MaxEntriesLimitPerQuery: 10,
QueryTimeout: model.Duration(30 * time.Second),
MaxQueryBytesRead: 10,
@@ -69,12 +71,17 @@ func TestLimiter_Defaults(t *testing.T) {
require.Equal(t, expectedLimits.MaxQueryBytesRead.Val(), maxQueryBytesRead)
requiredNumberLabels := l.RequiredNumberLabels(ctx, "fake")
require.Equal(t, expectedLimits.RequiredNumberLabels, requiredNumberLabels)
+ maxQueryRange := l.MaxQueryRange(ctx, "fake")
+ require.Equal(t, time.Duration(expectedLimits.MaxQueryRange), maxQueryRange)
- var limits QueryLimits
+ // Deserialized with defaults
+ limits, err := UnmarshalQueryLimits([]byte(`{}`))
+ require.NoError(t, err)
expectedLimits2 := QueryLimits{
MaxQueryLength: model.Duration(30 * time.Second),
MaxQueryLookback: model.Duration(30 * time.Second),
+ MaxQueryRange: model.Duration(30 * time.Second),
MaxEntriesLimitPerQuery: 10,
QueryTimeout: model.Duration(29 * time.Second),
RequiredLabels: []string{"foo", "bar"},
@@ -82,7 +89,7 @@ func TestLimiter_Defaults(t *testing.T) {
MaxQueryBytesRead: 10,
}
{
- ctx2 := InjectQueryLimitsContext(context.Background(), limits)
+ ctx2 := InjectQueryLimitsContext(context.Background(), *limits)
queryLookback := l.MaxQueryLookback(ctx2, "fake")
require.Equal(t, time.Duration(expectedLimits2.MaxQueryLookback), queryLookback)
queryLength := l.MaxQueryLength(ctx2, "fake")
@@ -95,6 +102,8 @@ func TestLimiter_Defaults(t *testing.T) {
require.Equal(t, expectedLimits2.MaxQueryBytesRead.Val(), maxQueryBytesRead)
requiredNumberLabels := l.RequiredNumberLabels(ctx2, "fake")
require.Equal(t, expectedLimits2.RequiredNumberLabels, requiredNumberLabels)
+ maxQueryRange := l.MaxQueryRange(ctx, "fake")
+ require.Equal(t, time.Duration(expectedLimits2.MaxQueryRange), maxQueryRange)
}
}
@@ -105,6 +114,7 @@ func TestLimiter_RejectHighLimits(t *testing.T) {
tLimits["fake"] = &validation.Limits{
MaxQueryLookback: model.Duration(30 * time.Second),
MaxQueryLength: model.Duration(30 * time.Second),
+ MaxQueryRange: model.Duration(30 * time.Second),
MaxEntriesLimitPerQuery: 10,
QueryTimeout: model.Duration(30 * time.Second),
RequiredNumberLabels: 10,
@@ -148,6 +158,7 @@ func TestLimiter_AcceptLowerLimits(t *testing.T) {
tLimits["fake"] = &validation.Limits{
MaxQueryLookback: model.Duration(30 * time.Second),
MaxQueryLength: model.Duration(30 * time.Second),
+ MaxQueryRange: model.Duration(2 * 24 * time.Hour),
MaxEntriesLimitPerQuery: 10,
QueryTimeout: model.Duration(30 * time.Second),
RequiredNumberLabels: 10,
@@ -160,6 +171,7 @@ func TestLimiter_AcceptLowerLimits(t *testing.T) {
limits := QueryLimits{
MaxQueryLength: model.Duration(29 * time.Second),
MaxQueryLookback: model.Duration(29 * time.Second),
+ MaxQueryRange: model.Duration(30 * time.Second),
MaxEntriesLimitPerQuery: 9,
QueryTimeout: model.Duration(29 * time.Second),
MaxQueryBytesRead: 9,
@@ -175,6 +187,7 @@ func TestLimiter_AcceptLowerLimits(t *testing.T) {
require.Equal(t, time.Duration(limits.QueryTimeout), l.QueryTimeout(ctx, "fake"))
require.Equal(t, limits.MaxQueryBytesRead.Val(), l.MaxQueryBytesRead(ctx, "fake"))
require.Equal(t, limits.RequiredNumberLabels, l.RequiredNumberLabels(ctx, "fake"))
+ require.Equal(t, time.Duration(limits.MaxQueryRange), l.MaxQueryRange(ctx, "fake"))
}
func TestLimiter_MergeLimits(t *testing.T) {
diff --git a/pkg/util/querylimits/middleware_test.go b/pkg/util/querylimits/middleware_test.go
index 8080fa23dcd24..2ba1b1608031d 100644
--- a/pkg/util/querylimits/middleware_test.go
+++ b/pkg/util/querylimits/middleware_test.go
@@ -27,6 +27,7 @@ func Test_MiddlewareWithoutHeader(t *testing.T) {
func Test_MiddlewareWithHeader(t *testing.T) {
limits := QueryLimits{
+ model.Duration(1 * time.Second),
model.Duration(1 * time.Second),
model.Duration(1 * time.Second),
1,
diff --git a/pkg/util/querylimits/propagation.go b/pkg/util/querylimits/propagation.go
index 724ecacd89cbd..95e0182f62f9b 100644
--- a/pkg/util/querylimits/propagation.go
+++ b/pkg/util/querylimits/propagation.go
@@ -5,8 +5,9 @@ import (
"encoding/json"
"net/http"
- "github.com/grafana/loki/pkg/util/flagext"
"github.com/prometheus/common/model"
+
+ "github.com/grafana/loki/pkg/util/flagext"
)
// Context key type used to avoid collisions
@@ -22,6 +23,7 @@ const (
// to support user-friendly duration format (e.g: "1h30m45s") in JSON value.
type QueryLimits struct {
MaxQueryLength model.Duration `json:"maxQueryLength,omitempty"`
+ MaxQueryRange model.Duration `json:"maxQueryInterval,omitempty"`
MaxQueryLookback model.Duration `json:"maxQueryLookback,omitempty"`
MaxEntriesLimitPerQuery int `json:"maxEntriesLimitPerQuery,omitempty"`
QueryTimeout model.Duration `json:"queryTimeout,omitempty"`
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index d1d2aac9fce6e..93989f0078bcc 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -85,6 +85,7 @@ type Limits struct {
MaxQuerySeries int `yaml:"max_query_series" json:"max_query_series"`
MaxQueryLookback model.Duration `yaml:"max_query_lookback" json:"max_query_lookback"`
MaxQueryLength model.Duration `yaml:"max_query_length" json:"max_query_length"`
+ MaxQueryRange model.Duration `yaml:"max_query_range" json:"max_query_range"`
MaxQueryParallelism int `yaml:"max_query_parallelism" json:"max_query_parallelism"`
TSDBMaxQueryParallelism int `yaml:"tsdb_max_query_parallelism" json:"tsdb_max_query_parallelism"`
CardinalityLimit int `yaml:"cardinality_limit" json:"cardinality_limit"`
@@ -221,6 +222,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
_ = l.MaxQueryLength.Set("721h")
f.Var(&l.MaxQueryLength, "store.max-query-length", "The limit to length of chunk store queries. 0 to disable.")
f.IntVar(&l.MaxQuerySeries, "querier.max-query-series", 500, "Limit the maximum of unique series that is returned by a metric query. When the limit is reached an error is returned.")
+ _ = l.MaxQueryRange.Set("0s")
+ f.Var(&l.MaxQueryRange, "querier.max-query-range", "Limit the length of the [range] inside a range query. Default is 0 or unlimited")
_ = l.QueryTimeout.Set(DefaultPerTenantQueryTimeout)
f.Var(&l.QueryTimeout, "querier.query-timeout", "Timeout when querying backends (ingesters or storage) during the execution of a query request. If a specific per-tenant timeout is used, this timeout is ignored.")
@@ -442,6 +445,11 @@ func (o *Overrides) MaxQuerySeries(ctx context.Context, userID string) int {
return o.getOverridesForUser(userID).MaxQuerySeries
}
+// MaxQueryRange returns the limit for the max [range] value that can be in a range query
+func (o *Overrides) MaxQueryRange(ctx context.Context, userID string) time.Duration {
+ return time.Duration(o.getOverridesForUser(userID).MaxQueryRange)
+}
+
// MaxQueriersPerUser returns the maximum number of queriers that can handle requests for this user.
func (o *Overrides) MaxQueriersPerUser(userID string) int {
return o.getOverridesForUser(userID).MaxQueriersPerTenant
|
loki
|
Add a limit for the [range] value on range queries (#8343)
|
75e1d97d85aa389a030aec732f5a9598a139ed77
|
2022-06-15 21:01:59
|
Karen Miller
|
docs: revise changelog entry for PR 5626 (#6406)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0bbbf592373c3..a32e7179baaf7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -27,7 +27,7 @@
* [5662](https://github.com/grafana/loki/pull/5662) **ssncferreira** **chaudum** Improve performance of instant queries by splitting range into multiple subqueries that are executed in parallel.
* [5685](https://github.com/grafana/loki/pull/5625) **chaudum** Fix bug in push request parser that allowed users to send arbitrary non-string data as "log line".
* [5707](https://github.com/grafana/loki/pull/5707) **franzwong** Rename Promtail configuration parameter from `limit_config` to `limits_config`.
-* [5626](https://github.com/grafana/loki/pull/5626) **jeschkies** Support multi-tenant select logs and samples queries.
+* [5626](https://github.com/grafana/loki/pull/5626) **jeschkies** Apply query limits to multi-tenant queries by choosing the most restrictive limit from the set of tenant limits.
* [5622](https://github.com/grafana/loki/pull/5622) **chaudum**: Fixed a bug in the query splitter that caused the `interval` query parameter to be ignored and therefore returning more logs than expected.
* [5521](https://github.com/grafana/loki/pull/5521) **cstyan**: Moved stream lag configuration to the top-level clients configuration structure, and refactored stream lag metric. This resolves a bug with duplicate metric collection when a single Promtail binary is running multiple Promtail clients.
* [5568](https://github.com/grafana/loki/pull/5568) **afayngelerindbx**: Fix Loki Canary panics that were due to concurrent execution of `confirmMissing`.
|
docs
|
revise changelog entry for PR 5626 (#6406)
|
b05c4f728839f0de1b8386394c99d54c5fc03cc1
|
2024-05-13 12:11:17
|
Kaviraj Kanagaraj
|
chore(logging): Add entry's timestamp when rejected with `too far behind` (#12933)
| false
|
diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go
index b96d9f705d092..8d6f5e1e8dd60 100644
--- a/pkg/chunkenc/interface.go
+++ b/pkg/chunkenc/interface.go
@@ -24,6 +24,10 @@ var (
)
type errTooFarBehind struct {
+ // original timestmap of the entry itself.
+ entryTs time.Time
+
+ // cutoff is the oldest acceptable timstamp of the `stream` that entry belongs to.
cutoff time.Time
}
@@ -32,12 +36,12 @@ func IsErrTooFarBehind(err error) bool {
return ok
}
-func ErrTooFarBehind(cutoff time.Time) error {
- return &errTooFarBehind{cutoff: cutoff}
+func ErrTooFarBehind(entryTs, cutoff time.Time) error {
+ return &errTooFarBehind{entryTs: entryTs, cutoff: cutoff}
}
func (m *errTooFarBehind) Error() string {
- return "entry too far behind, oldest acceptable timestamp is: " + m.cutoff.Format(time.RFC3339)
+ return fmt.Sprintf("entry too far behind, entry timestamp is: %s, oldest acceptable timestamp is: %s", m.entryTs.Format(time.RFC3339), m.cutoff.Format(time.RFC3339))
}
func IsOutOfOrderErr(err error) bool {
diff --git a/pkg/chunkenc/interface_test.go b/pkg/chunkenc/interface_test.go
index daea36cb38e72..ed81c4d3604e4 100644
--- a/pkg/chunkenc/interface_test.go
+++ b/pkg/chunkenc/interface_test.go
@@ -31,7 +31,9 @@ func TestParseEncoding(t *testing.T) {
}
func TestIsOutOfOrderErr(t *testing.T) {
- for _, err := range []error{ErrOutOfOrder, ErrTooFarBehind(time.Now())} {
+ now := time.Now()
+
+ for _, err := range []error{ErrOutOfOrder, ErrTooFarBehind(now, now)} {
require.Equal(t, true, IsOutOfOrderErr(err))
}
}
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index d7a29b73e802d..6bf75dfa1ac54 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -394,7 +394,7 @@ func (s *stream) validateEntries(entries []logproto.Entry, isReplay, rateLimitWh
// The validity window for unordered writes is the highest timestamp present minus 1/2 * max-chunk-age.
cutoff := highestTs.Add(-s.cfg.MaxChunkAge / 2)
if !isReplay && s.unorderedWrites && !highestTs.IsZero() && cutoff.After(entries[i].Timestamp) {
- failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], chunkenc.ErrTooFarBehind(cutoff)})
+ failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], chunkenc.ErrTooFarBehind(entries[i].Timestamp, cutoff)})
s.writeFailures.Log(s.tenant, fmt.Errorf("%w for stream %s", failedEntriesWithError[len(failedEntriesWithError)-1].e, s.labels))
outOfOrderSamples++
outOfOrderBytes += lineBytes
diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go
index 26eef4e3a7936..af877bf88da9e 100644
--- a/pkg/ingester/stream_test.go
+++ b/pkg/ingester/stream_test.go
@@ -84,8 +84,9 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
var expected bytes.Buffer
for i := 0; i < tc.expectErrs; i++ {
fmt.Fprintf(&expected,
- "entry with timestamp %s ignored, reason: 'entry too far behind, oldest acceptable timestamp is: %s',\n",
+ "entry with timestamp %s ignored, reason: 'entry too far behind, entry timestamp is: %s, oldest acceptable timestamp is: %s',\n",
time.Unix(int64(i), 0).String(),
+ newLines[i].Timestamp.Format(time.RFC3339),
time.Unix(int64(numLogs), 0).Format(time.RFC3339),
)
}
|
chore
|
Add entry's timestamp when rejected with `too far behind` (#12933)
|
98e7da0a388009a634a521c333bc28d73023e6d9
|
2024-12-10 03:34:14
|
renovate[bot]
|
fix(deps): update module github.com/baidubce/bce-sdk-go to v0.9.206 (#15324)
| false
|
diff --git a/go.mod b/go.mod
index bac9587705b5e..9f89284834934 100644
--- a/go.mod
+++ b/go.mod
@@ -21,7 +21,7 @@ require (
github.com/alicebob/miniredis/v2 v2.33.0
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/aws/aws-sdk-go v1.55.5
- github.com/baidubce/bce-sdk-go v0.9.205
+ github.com/baidubce/bce-sdk-go v0.9.206
github.com/bmatcuk/doublestar/v4 v4.7.1
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
github.com/cespare/xxhash/v2 v2.3.0
diff --git a/go.sum b/go.sum
index f366e1c40596b..59c8f36a6b101 100644
--- a/go.sum
+++ b/go.sum
@@ -1008,8 +1008,8 @@ github.com/aws/smithy-go v1.11.1 h1:IQ+lPZVkSM3FRtyaDox41R8YS6iwPMYIreejOgPW49g=
github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
github.com/axiomhq/hyperloglog v0.2.1 h1:z+rouIlYdpZ+DVfnQigBimhQL6OKHIL3e8+hMiud5/c=
github.com/axiomhq/hyperloglog v0.2.1/go.mod h1:WCdOZ8PNJKNcBw3xFZ7iHlnUn1nDVHK/XToLjjmySh4=
-github.com/baidubce/bce-sdk-go v0.9.205 h1:9cx93gC4FSu3W3G4NkDfFl0XMUycCpvQN+nB3doNmvg=
-github.com/baidubce/bce-sdk-go v0.9.205/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
+github.com/baidubce/bce-sdk-go v0.9.206 h1:1nmKLHWCkPzpmVATiC15+4q/lYkx4PdXd2qKfYUzTes=
+github.com/baidubce/bce-sdk-go v0.9.206/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
index dde687a70cc0d..90cbb873f3992 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
@@ -26,7 +26,7 @@ import (
// Constants and default values for the package bce
const (
- SDK_VERSION = "0.9.205"
+ SDK_VERSION = "0.9.206"
URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path
DEFAULT_DOMAIN = "baidubce.com"
DEFAULT_PROTOCOL = "http"
diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go
index 3e05fcd0a05ea..c6aa312ab4f60 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/bucket.go
@@ -19,6 +19,7 @@ package api
import (
"encoding/json"
+ "fmt"
"strconv"
"github.com/baidubce/bce-sdk-go/bce"
@@ -1253,3 +1254,48 @@ func DeleteBucketTag(cli bce.Client, bucket string, ctx *BosContext) error {
defer func() { resp.Body().Close() }()
return nil
}
+
+func GetBosShareLink(cli bce.Client, bucket, prefix, shareCode string, duration int) (string, error) {
+ req := &bce.BceRequest{}
+ req.SetEndpoint(BOS_SHARE_ENDPOINT)
+ req.SetParam("action", "")
+ req.SetMethod(http.POST)
+ if len(shareCode) != 0 && len(shareCode) != 6 {
+ return "", fmt.Errorf("shareCode length must be 0 or 6")
+ }
+ if duration < 60 || duration > 64800 {
+ return "", fmt.Errorf("duration must between 1 minute and 18 hours")
+ }
+ bosShareReqBody := &BosShareLinkArgs{
+ Bucket: bucket,
+ Endpoint: cli.GetBceClientConfig().Endpoint,
+ Prefix: prefix,
+ ShareCode: shareCode,
+ DurationSeconds: int64(duration),
+ }
+ jsonBytes, jsonErr := json.Marshal(bosShareReqBody)
+ if jsonErr != nil {
+ return "", jsonErr
+ }
+ body, err := bce.NewBodyFromBytes(jsonBytes)
+ if err != nil {
+ return "", err
+ }
+ req.SetBody(body)
+ resp := &bce.BceResponse{}
+ if err = cli.SendRequest(req, resp); err != nil {
+ return "", err
+ }
+ if resp.IsFail() {
+ return "", resp.ServiceError()
+ }
+ bosShareResBody := &BosShareResBody{}
+ if err := resp.ParseJsonBody(bosShareResBody); err != nil {
+ return "", err
+ }
+ jsonData, err := json.Marshal(bosShareResBody)
+ if err != nil {
+ return "", err
+ }
+ return string(jsonData), nil
+}
diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go
index 7d4007e030ecc..c59ee8f6bf06d 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/model.go
@@ -670,3 +670,17 @@ type ObjectTag struct {
Key string `json:"key"`
Value string `json:"value"`
}
+
+type BosShareLinkArgs struct {
+ Bucket string `json:"bucket"`
+ Endpoint string `json:"endpoint"`
+ Prefix string `json:"prefix"`
+ ShareCode string `json:"shareCode"`
+ DurationSeconds int64 `json:"durationSeconds"`
+}
+
+type BosShareResBody struct {
+ ShareUrl string `json:"shareUrl"`
+ LinkExpireTime int64 `json:"linkExpireTime"`
+ ShareCode string `json:"shareCode"`
+}
\ No newline at end of file
diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go
index b2fa1e2e88394..e4226c37b925c 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/api/util.go
@@ -66,6 +66,7 @@ const (
NAMESPACE_BUCKET = "namespace"
BOS_CONFIG_PREFIX = "bos://"
+ BOS_SHARE_ENDPOINT = "bos-share.baidubce.com"
)
var DEFAULT_CNAME_LIKE_LIST = []string{
diff --git a/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go b/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go
index a3bd985e7a1e1..9230a0231424a 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/services/bos/client.go
@@ -2310,3 +2310,7 @@ func (c *Client) GetObjectTag(bucket string, object string) (map[string]interfac
func (c *Client) DeleteObjectTag(bucket string, object string) error {
return api.DeleteObjectTag(c, bucket, object, c.BosContext)
}
+
+func (c *Client) BosShareLinkGet(bucket string, prefix string, shareCode string, duration int) (string, error) {
+ return api.GetBosShareLink(c, bucket, prefix, shareCode, duration)
+}
\ No newline at end of file
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 7810d1ce504bf..0ae3d624eb6ef 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -465,7 +465,7 @@ github.com/aws/smithy-go/transport/http/internal/io
# github.com/axiomhq/hyperloglog v0.2.1
## explicit; go 1.23
github.com/axiomhq/hyperloglog
-# github.com/baidubce/bce-sdk-go v0.9.205
+# github.com/baidubce/bce-sdk-go v0.9.206
## explicit; go 1.11
github.com/baidubce/bce-sdk-go/auth
github.com/baidubce/bce-sdk-go/bce
|
fix
|
update module github.com/baidubce/bce-sdk-go to v0.9.206 (#15324)
|
b7b5c76743dd0141ab8e1c3800843a6936851855
|
2025-03-01 06:44:38
|
renovate[bot]
|
fix(deps): update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.78.0 (main) (#16513)
| false
|
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod
index d3056d0a73aea..328bdaf01db84 100644
--- a/tools/lambda-promtail/go.mod
+++ b/tools/lambda-promtail/go.mod
@@ -8,7 +8,7 @@ require (
github.com/aws/aws-lambda-go v1.47.0
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/config v1.29.8
- github.com/aws/aws-sdk-go-v2/service/s3 v1.77.1
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0
github.com/go-kit/log v0.2.1
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
@@ -34,11 +34,11 @@ require (
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.33 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.14 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 // indirect
diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum
index 5dec44db8f5a3..0c7e03ad21a02 100644
--- a/tools/lambda-promtail/go.sum
+++ b/tools/lambda-promtail/go.sum
@@ -71,18 +71,18 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0io
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.33 h1:/frG8aV09yhCVSOEC2pzktflJJO48NwY3xntHBwxHiA=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.33/go.mod h1:8vwASlAcV366M+qxZnjNzCjeastk1Rt1bpSRaGZanGU=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.1 h1:7SuukGpyIgF5EiAbf1dZRxP+xSnY1WjiHBjL08fjJeE=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.1/go.mod h1:k+Vce/8R28tSozjdWphkrNhK8zLmdS9RgiDNZl6p8Rw=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2 h1:t/gZFyrijKuSU0elA5kRngP/oU3mc0I+Dvp8HwRE4c0=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.2/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.14 h1:fgdkfsxTehqPcIQa24G/Omwv9RocTq2UcONNX/OnrZI=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.14/go.mod h1:wMxQ3OE8fiM8z2YRAeb2J8DLTTWMvRyYYuQOs26AbTQ=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.77.1 h1:5bI9tJL2Z0FGFtp/LPDv0eyliFBHCn7LAhqpQuL+7kk=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.77.1/go.mod h1:njj3tSJONkfdLt4y6X8pyqeM6sJLNZxmzctKKV+n1GM=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0 h1:EBm8lXevBWe+kK9VOU/IBeOI189WPRwPUc3LvJK9GOs=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.78.0/go.mod h1:4qzsZSzB/KiX2EzDjs9D7A8rI/WGJxZceVJIHqtJjIU=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 h1:2U9sF8nKy7UgyEeLiZTRg6ShBS22z8UnYpV6aRFL0is=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.0/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 h1:wjAdc85cXdQR5uLx5FwWvGIHm4OPJhTyzUHU8craXtE=
|
fix
|
update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.78.0 (main) (#16513)
|
47e2c5884f443667e64764f3fc3948f8f11abbb8
|
2023-10-12 18:30:48
|
Đỗ Trọng Hải
|
fix(promtail): prevent panic due to duplicate metric registration after reloaded (#10798)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 985fea97acb6a..2b9fc397ef83f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -50,6 +50,7 @@
##### Fixes
* [10631](https://github.com/grafana/loki/pull/10631) **thampiotr**: Fix race condition in cleaning up metrics when stopping to tail files.
+* [10798](https://github.com/grafana/loki/pull/10798) **hainenber**: Fix agent panicking after reloaded due to duplicate metric collector registration.
#### LogCLI
diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget.go b/clients/pkg/promtail/targets/lokipush/pushtarget.go
index a204227df529b..7bd63e47d6de0 100644
--- a/clients/pkg/promtail/targets/lokipush/pushtarget.go
+++ b/clients/pkg/promtail/targets/lokipush/pushtarget.go
@@ -82,6 +82,12 @@ func (t *PushTarget) run() error {
serverCfg := &t.config.Server
serverCfg.Log = util_log.InitLogger(serverCfg, prometheus.NewRegistry(), true, false)
+ // Set new registry for upcoming metric server
+ // If not, it'll likely panic when the tool gets reloaded.
+ if t.config.Server.Registerer == nil {
+ t.config.Server.Registerer = prometheus.NewRegistry()
+ }
+
srv, err := server.New(t.config.Server)
if err != nil {
return err
diff --git a/clients/pkg/promtail/wal/watcher_metrics.go b/clients/pkg/promtail/wal/watcher_metrics.go
index 34b74786e15b9..7fd15c7af2306 100644
--- a/clients/pkg/promtail/wal/watcher_metrics.go
+++ b/clients/pkg/promtail/wal/watcher_metrics.go
@@ -1,6 +1,10 @@
package wal
-import "github.com/prometheus/client_golang/prometheus"
+import (
+ "errors"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
type WatcherMetrics struct {
recordsRead *prometheus.CounterVec
@@ -69,13 +73,45 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
),
}
+ // Collectors will be re-registered to registry if it's got reloaded
+ // Reuse the old collectors instead of panicking out.
if reg != nil {
- reg.MustRegister(m.recordsRead)
- reg.MustRegister(m.recordDecodeFails)
- reg.MustRegister(m.droppedWriteNotifications)
- reg.MustRegister(m.segmentRead)
- reg.MustRegister(m.currentSegment)
- reg.MustRegister(m.watchersRunning)
+ if err := reg.Register(m.recordsRead); err != nil {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
+ m.recordsRead = are.ExistingCollector.(*prometheus.CounterVec)
+ }
+ }
+ if err := reg.Register(m.recordDecodeFails); err != nil {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
+ m.recordDecodeFails = are.ExistingCollector.(*prometheus.CounterVec)
+ }
+ }
+ if err := reg.Register(m.droppedWriteNotifications); err != nil {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
+ m.droppedWriteNotifications = are.ExistingCollector.(*prometheus.CounterVec)
+ }
+ }
+ if err := reg.Register(m.segmentRead); err != nil {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
+ m.segmentRead = are.ExistingCollector.(*prometheus.CounterVec)
+ }
+ }
+ if err := reg.Register(m.currentSegment); err != nil {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
+ m.currentSegment = are.ExistingCollector.(*prometheus.GaugeVec)
+ }
+ }
+ if err := reg.Register(m.watchersRunning); err != nil {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
+ m.watchersRunning = are.ExistingCollector.(*prometheus.GaugeVec)
+ }
+ }
}
return m
|
fix
|
prevent panic due to duplicate metric registration after reloaded (#10798)
|
a248e3e97067b99e29511d9bd3285b4a789e5d20
|
2023-06-02 13:55:39
|
Danny Kopping
|
querier: configurable writeback queue bytes size (#9604)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5101bf8acd40f..93f60c4263d5d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -32,6 +32,7 @@
* [9357](https://github.com/grafana/loki/pull/9357) **Indransh**: Add HTTP API to change the log level at runtime
* [9431](https://github.com/grafana/loki/pull/9431) **dannykopping**: Add more buckets to `loki_memcache_request_duration_seconds` metric; latencies can increase if using memcached with NVMe
* [8684](https://github.com/grafana/loki/pull/8684) **oleksii-boiko-ua**: Helm: Add hpa templates for read, write and backend components.
+* [9604](https://github.com/grafana/loki/pull/9604) **dannykopping**: Querier: configurable writeback queue bytes size
##### Fixes
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
index acd730b088508..db19320dfa93f 100644
--- a/docs/sources/configuration/_index.md
+++ b/docs/sources/configuration/_index.md
@@ -3753,6 +3753,10 @@ background:
# CLI flag: -<prefix>.background.write-back-buffer
[writeback_buffer: <int> | default = 10000]
+ # Size limit in bytes for background write-back.
+ # CLI flag: -<prefix>.background.write-back-size-limit
+ [writeback_size_limit: <int> | default = 1GB]
+
memcached:
# How long keys stay in the memcache.
# CLI flag: -<prefix>.memcached.expiration
diff --git a/pkg/storage/chunk/cache/background.go b/pkg/storage/chunk/cache/background.go
index 0140f1313f270..1705fc388d8db 100644
--- a/pkg/storage/chunk/cache/background.go
+++ b/pkg/storage/chunk/cache/background.go
@@ -10,32 +10,41 @@ import (
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "go.uber.org/atomic"
+ "github.com/grafana/loki/pkg/util/flagext"
util_log "github.com/grafana/loki/pkg/util/log"
)
// BackgroundConfig is config for a Background Cache.
type BackgroundConfig struct {
- WriteBackGoroutines int `yaml:"writeback_goroutines"`
- WriteBackBuffer int `yaml:"writeback_buffer"`
+ WriteBackGoroutines int `yaml:"writeback_goroutines"`
+ WriteBackBuffer int `yaml:"writeback_buffer"`
+ WriteBackSizeLimit flagext.ByteSize `yaml:"writeback_size_limit"`
}
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet
func (cfg *BackgroundConfig) RegisterFlagsWithPrefix(prefix string, description string, f *flag.FlagSet) {
f.IntVar(&cfg.WriteBackGoroutines, prefix+"background.write-back-concurrency", 10, description+"At what concurrency to write back to cache.")
f.IntVar(&cfg.WriteBackBuffer, prefix+"background.write-back-buffer", 10000, description+"How many key batches to buffer for background write-back.")
+ _ = cfg.WriteBackSizeLimit.Set("1GB")
+ f.Var(&cfg.WriteBackSizeLimit, prefix+"background.write-back-size-limit", description+"Size limit in bytes for background write-back.")
}
type backgroundCache struct {
Cache
- wg sync.WaitGroup
- quit chan struct{}
- bgWrites chan backgroundWrite
- name string
-
- droppedWriteBack prometheus.Counter
- queueLength prometheus.Gauge
+ wg sync.WaitGroup
+ quit chan struct{}
+ bgWrites chan backgroundWrite
+ name string
+ size atomic.Int64
+ sizeLimit int
+
+ droppedWriteBack prometheus.Counter
+ droppedWriteBackBytes prometheus.Counter
+ queueLength prometheus.Gauge
+ queueBytes prometheus.Gauge
}
type backgroundWrite struct {
@@ -43,24 +52,49 @@ type backgroundWrite struct {
bufs [][]byte
}
+func (b *backgroundWrite) size() int {
+ var sz int
+
+ for _, buf := range b.bufs {
+ sz += len(buf)
+ }
+
+ return sz
+}
+
// NewBackground returns a new Cache that does stores on background goroutines.
func NewBackground(name string, cfg BackgroundConfig, cache Cache, reg prometheus.Registerer) Cache {
c := &backgroundCache{
- Cache: cache,
- quit: make(chan struct{}),
- bgWrites: make(chan backgroundWrite, cfg.WriteBackBuffer),
- name: name,
+ Cache: cache,
+ quit: make(chan struct{}),
+ bgWrites: make(chan backgroundWrite, cfg.WriteBackBuffer),
+ name: name,
+ sizeLimit: cfg.WriteBackSizeLimit.Val(),
+
droppedWriteBack: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "loki",
Name: "cache_dropped_background_writes_total",
Help: "Total count of dropped write backs to cache.",
ConstLabels: prometheus.Labels{"name": name},
}),
+ droppedWriteBackBytes: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "loki",
+ Name: "cache_dropped_background_writes_bytes_total",
+ Help: "Amount of data dropped in write backs to cache.",
+ ConstLabels: prometheus.Labels{"name": name},
+ }),
queueLength: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Namespace: "loki",
Name: "cache_background_queue_length",
- Help: "Length of the cache background write queue.",
+ Help: "Length of the cache background writeback queue.",
+ ConstLabels: prometheus.Labels{"name": name},
+ }),
+
+ queueBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
+ Namespace: "loki",
+ Name: "cache_background_queue_bytes",
+ Help: "Amount of data in the background writeback queue.",
ConstLabels: prometheus.Labels{"name": name},
}),
}
@@ -95,15 +129,21 @@ func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byt
keys: keys[:num],
bufs: bufs[:num],
}
+
+ size := bgWrite.size()
+ newSize := c.size.Load() + int64(size)
+ if newSize > int64(c.sizeLimit) {
+ c.failStore(ctx, size, num, "queue at byte size limit")
+ return nil
+ }
+
select {
case c.bgWrites <- bgWrite:
+ c.size.Add(int64(size))
+ c.queueBytes.Set(float64(c.size.Load()))
c.queueLength.Add(float64(num))
default:
- c.droppedWriteBack.Add(float64(num))
- sp := opentracing.SpanFromContext(ctx)
- if sp != nil {
- sp.LogFields(otlog.Int("dropped", num))
- }
+ c.failStore(ctx, size, num, "queue at full capacity")
return nil // queue is full; give up
}
keys = keys[num:]
@@ -112,6 +152,15 @@ func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byt
return nil
}
+func (c *backgroundCache) failStore(ctx context.Context, size int, num int, reason string) {
+ c.droppedWriteBackBytes.Add(float64(size))
+ c.droppedWriteBack.Add(float64(num))
+ sp := opentracing.SpanFromContext(ctx)
+ if sp != nil {
+ sp.LogFields(otlog.String("reason", reason), otlog.Int("dropped", num), otlog.Int("dropped_bytes", size))
+ }
+}
+
func (c *backgroundCache) writeBackLoop() {
defer c.wg.Done()
@@ -121,7 +170,10 @@ func (c *backgroundCache) writeBackLoop() {
if !ok {
return
}
+ c.size.Sub(int64(bgWrite.size()))
+
c.queueLength.Sub(float64(len(bgWrite.keys)))
+ c.queueBytes.Set(float64(c.size.Load()))
err := c.Cache.Store(context.Background(), bgWrite.keys, bgWrite.bufs)
if err != nil {
level.Warn(util_log.Logger).Log("msg", "backgroundCache writeBackLoop Cache.Store fail", "err", err)
diff --git a/pkg/storage/chunk/cache/background_test.go b/pkg/storage/chunk/cache/background_test.go
index ab75f6715c60a..af9827865ee7a 100644
--- a/pkg/storage/chunk/cache/background_test.go
+++ b/pkg/storage/chunk/cache/background_test.go
@@ -1,16 +1,27 @@
package cache_test
import (
+ "context"
+ "crypto/rand"
"testing"
+ "github.com/dustin/go-humanize"
+ "github.com/stretchr/testify/require"
+
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/storage/config"
+ "github.com/grafana/loki/pkg/util/flagext"
)
func TestBackground(t *testing.T) {
+ // irrelevant in this test, set very high
+ limit, err := humanize.ParseBytes("5GB")
+ require.NoError(t, err)
+
c := cache.NewBackground("mock", cache.BackgroundConfig{
WriteBackGoroutines: 1,
WriteBackBuffer: 100,
+ WriteBackSizeLimit: flagext.ByteSize(limit),
}, cache.NewMockCache(), nil)
s := config.SchemaConfig{
@@ -30,3 +41,32 @@ func TestBackground(t *testing.T) {
testCacheMultiple(t, c, keys, chunks)
testCacheMiss(t, c)
}
+
+func TestBackgroundSizeLimit(t *testing.T) {
+ limit, err := humanize.ParseBytes("15KB")
+ require.NoError(t, err)
+
+ c := cache.NewBackground("mock", cache.BackgroundConfig{
+ WriteBackGoroutines: 1,
+ WriteBackBuffer: 100,
+ WriteBackSizeLimit: flagext.ByteSize(limit),
+ }, cache.NewMockCache(), nil)
+
+ ctx := context.Background()
+
+ const firstKey = "first"
+ const secondKey = "second"
+ first := make([]byte, 10e3) // 10KB
+ second := make([]byte, 10e3) // 10KB
+ _, _ = rand.Read(first)
+ _, _ = rand.Read(second)
+
+ // store the first 10KB
+ require.NoError(t, c.Store(ctx, []string{firstKey}, [][]byte{first}))
+ // second key will not be stored because it will exceed the 15KB limit
+ require.NoError(t, c.Store(ctx, []string{secondKey}, [][]byte{second}))
+ cache.Flush(c)
+
+ found, _, _, _ := c.Fetch(ctx, []string{firstKey, secondKey})
+ require.Equal(t, []string{firstKey}, found)
+}
|
querier
|
configurable writeback queue bytes size (#9604)
|
e65f26d30f9742d407fc6aa1e32dba3320952620
|
2022-04-07 19:17:50
|
Tat Chiu Leung
|
storage: make Azure blobID chunk delimiter configurable (#5777)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 406ae6051bee2..4b19e02113a68 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
* [5780](https://github.com/grafana/loki/pull/5780) **simonswine**: Update alpine image to 3.15.4.
+* [5777](https://github.com/grafana/loki/pull/5777) **tatchiuleung** storage: make Azure blobID chunk delimiter configurable.
* [5715](https://github.com/grafana/loki/pull/5715) **chaudum** Add option to push RFC5424 syslog messages from Promtail in syslog scrape target.
* [5696](https://github.com/grafana/loki/pull/5696) **paullryan** don't block scraping of new logs from cloudflare within promtail if an error is received from cloudflare about too early logs.
* [5685](https://github.com/grafana/loki/pull/5625) **chaudum** Fix bug in push request parser that allowed users to send arbitrary non-string data as "log line".
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
index ea08d3d6888b9..33612a53ca375 100644
--- a/docs/sources/configuration/_index.md
+++ b/docs/sources/configuration/_index.md
@@ -743,6 +743,10 @@ The `azure_storage_config` configures Azure as a general storage for different d
# CLI flag: -<prefix>.azure.account-key
[account_key: <string> | default = ""]
+# Chunk delimiter to build the blobID
+# CLI flag: -<prefix>.azure.chunk-delimiter
+[chunk_delimiter: <string> | default = "-"]
+
# Preallocated buffer size for downloads.
# CLI flag: -<prefix>.azure.download-buffer-size
[download_buffer_size: <int> | default = 512000]
diff --git a/pkg/storage/chunk/azure/blob_storage_client.go b/pkg/storage/chunk/azure/blob_storage_client.go
index ce5807103ec84..f475136c169c6 100644
--- a/pkg/storage/chunk/azure/blob_storage_client.go
+++ b/pkg/storage/chunk/azure/blob_storage_client.go
@@ -85,6 +85,7 @@ type BlobStorageConfig struct {
Environment string `yaml:"environment"`
ContainerName string `yaml:"container_name"`
AccountName string `yaml:"account_name"`
+ ChunkDelimiter string `yaml:"chunk_delimiter"`
AccountKey flagext.Secret `yaml:"account_key"`
DownloadBufferSize int `yaml:"download_buffer_size"`
UploadBufferSize int `yaml:"upload_buffer_size"`
@@ -106,6 +107,7 @@ func (c *BlobStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagS
f.StringVar(&c.Environment, prefix+"azure.environment", azureGlobal, fmt.Sprintf("Azure Cloud environment. Supported values are: %s.", strings.Join(supportedEnvironments, ", ")))
f.StringVar(&c.ContainerName, prefix+"azure.container-name", "cortex", "Name of the blob container used to store chunks. This container must be created before running cortex.")
f.StringVar(&c.AccountName, prefix+"azure.account-name", "", "The Microsoft Azure account name to be used")
+ f.StringVar(&c.ChunkDelimiter, prefix+"azure.chunk-delimiter", "-", "Chunk delimiter for blob ID to be used")
f.Var(&c.AccountKey, prefix+"azure.account-key", "The Microsoft Azure account key to use.")
f.DurationVar(&c.RequestTimeout, prefix+"azure.request-timeout", 30*time.Second, "Timeout for requests made against azure blob storage.")
f.IntVar(&c.DownloadBufferSize, prefix+"azure.download-buffer-size", 512000, "Preallocated buffer size for downloads.")
@@ -251,7 +253,7 @@ func (b *BlobStorage) PutObject(ctx context.Context, objectKey string, object io
}
func (b *BlobStorage) getBlobURL(blobID string, hedging bool) (azblob.BlockBlobURL, error) {
- blobID = strings.Replace(blobID, ":", "-", -1)
+ blobID = strings.Replace(blobID, ":", b.cfg.ChunkDelimiter, -1)
// generate url for new chunk blob
u, err := url.Parse(fmt.Sprintf(b.selectBlobURLFmt(), b.cfg.AccountName, b.cfg.ContainerName, blobID))
|
storage
|
make Azure blobID chunk delimiter configurable (#5777)
|
275b768bda68af4278901cc45f10fceb97123851
|
2024-11-07 21:53:13
|
loki-gh-app[bot]
|
chore: release loki helm chart 6.19.0 (#14570)
| false
|
diff --git a/.github/workflows/helm-ci.yml b/.github/workflows/helm-ci.yml
index f056c14bcb83e..e31cf593f8b10 100644
--- a/.github/workflows/helm-ci.yml
+++ b/.github/workflows/helm-ci.yml
@@ -31,7 +31,7 @@ jobs:
- name: Lint Code Base
uses: docker://github/super-linter:v3.17.2
env:
- FILTER_REGEX_EXCLUDE: .*(README\.md|Chart\.yaml|NOTES.txt).*
+ FILTER_REGEX_EXCLUDE: .*(CHANGELOG\.md|README\.md|Chart\.yaml|NOTES.txt).*
FILTER_REGEX_INCLUDE: .*production/helm/.*
VALIDATE_ALL_CODEBASE: false
VALIDATE_KUBERNETES_KUBEVAL: false
diff --git a/_shared-workflows-dockerhub-login b/_shared-workflows-dockerhub-login
new file mode 160000
index 0000000000000..e34b275767e9a
--- /dev/null
+++ b/_shared-workflows-dockerhub-login
@@ -0,0 +1 @@
+Subproject commit e34b275767e9a075ed07c25ef0173dacf5fd4ca6
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 73af7d78fde02..77e1971531421 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,8 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 6.19.0
+
## 6.18.0
- [CHANGE] Added automated weekly releases, which created this release.
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 273bdbbb7db13..18c290080715d 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
type: application
appVersion: 3.2.0
-version: 6.18.0
+version: 6.19.0
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index f78406625ac47..9247bce74289a 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
|
chore
|
release loki helm chart 6.19.0 (#14570)
|
5824e3d35cd1273ccd1a63d7381098617a7697dd
|
2024-10-25 19:46:43
|
Joao Marcal
|
feat(storage): Azure backend using thanos.io/objstore (#11315)
| false
|
diff --git a/pkg/storage/bucket/azure/bucket_client.go b/pkg/storage/bucket/azure/bucket_client.go
index c7d1e580bcca2..0cd5e6b3bacff 100644
--- a/pkg/storage/bucket/azure/bucket_client.go
+++ b/pkg/storage/bucket/azure/bucket_client.go
@@ -1,39 +1,37 @@
package azure
import (
+ "net/http"
+
"github.com/go-kit/log"
- "github.com/prometheus/common/model"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/providers/azure"
- yaml "gopkg.in/yaml.v2"
)
func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) {
- bucketConfig := azure.Config{
- StorageAccountName: cfg.StorageAccountName,
- StorageAccountKey: cfg.StorageAccountKey.String(),
- StorageConnectionString: cfg.ConnectionString.String(),
- ContainerName: cfg.ContainerName,
- Endpoint: cfg.EndpointSuffix,
- MaxRetries: cfg.MaxRetries,
- HTTPConfig: azure.HTTPConfig{
- IdleConnTimeout: model.Duration(cfg.IdleConnTimeout),
- ResponseHeaderTimeout: model.Duration(cfg.ResponseHeaderTimeout),
- InsecureSkipVerify: cfg.InsecureSkipVerify,
- TLSHandshakeTimeout: model.Duration(cfg.TLSHandshakeTimeout),
- ExpectContinueTimeout: model.Duration(cfg.ExpectContinueTimeout),
- MaxIdleConns: cfg.MaxIdleConns,
- MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost,
- MaxConnsPerHost: cfg.MaxConnsPerHost,
- },
+ return newBucketClient(cfg, name, logger, azure.NewBucketWithConfig)
+}
+
+func newBucketClient(cfg Config, name string, logger log.Logger, factory func(log.Logger, azure.Config, string, http.RoundTripper) (*azure.Bucket, error)) (objstore.Bucket, error) {
+ // Start with default config to make sure that all parameters are set to sensible values, especially
+ // HTTP Config field.
+ bucketConfig := azure.DefaultConfig
+ bucketConfig.StorageAccountName = cfg.StorageAccountName
+ bucketConfig.StorageAccountKey = cfg.StorageAccountKey.String()
+ bucketConfig.StorageConnectionString = cfg.StorageConnectionString.String()
+ bucketConfig.ContainerName = cfg.ContainerName
+ bucketConfig.MaxRetries = cfg.MaxRetries
+ bucketConfig.UserAssignedID = cfg.UserAssignedID
+
+ if cfg.Endpoint != "" {
+ // azure.DefaultConfig has the default Endpoint, overwrite it only if a different one was explicitly provided.
+ bucketConfig.Endpoint = cfg.Endpoint
}
- // Thanos currently doesn't support passing the config as is, but expects a YAML,
- // so we're going to serialize it.
- serialized, err := yaml.Marshal(bucketConfig)
- if err != nil {
- return nil, err
+ var rt http.RoundTripper
+ if cfg.Transport != nil {
+ rt = cfg.Transport
}
- return azure.NewBucket(logger, serialized, name, nil)
+ return factory(logger, bucketConfig, name, rt)
}
diff --git a/pkg/storage/bucket/azure/config.go b/pkg/storage/bucket/azure/config.go
index 928503190d931..ac8037b6b7819 100644
--- a/pkg/storage/bucket/azure/config.go
+++ b/pkg/storage/bucket/azure/config.go
@@ -2,22 +2,23 @@ package azure
import (
"flag"
+ "net/http"
"github.com/grafana/dskit/flagext"
-
- "github.com/grafana/loki/v3/pkg/storage/bucket/http"
)
// Config holds the config options for an Azure backend
type Config struct {
- StorageAccountName string `yaml:"account_name"`
- StorageAccountKey flagext.Secret `yaml:"account_key"`
- ConnectionString flagext.Secret `yaml:"connection_string"`
- ContainerName string `yaml:"container_name"`
- EndpointSuffix string `yaml:"endpoint_suffix"`
- MaxRetries int `yaml:"max_retries"`
+ StorageAccountName string `yaml:"account_name"`
+ StorageAccountKey flagext.Secret `yaml:"account_key"`
+ StorageConnectionString flagext.Secret `yaml:"connection_string"`
+ ContainerName string `yaml:"container_name"`
+ Endpoint string `yaml:"endpoint_suffix"`
+ MaxRetries int `yaml:"max_retries"`
+ UserAssignedID string `yaml:"user_assigned_id"`
- http.Config `yaml:"http"`
+ // Allow upstream callers to inject a round tripper
+ Transport http.RoundTripper `yaml:"-"`
}
// RegisterFlags registers the flags for Azure storage
@@ -28,10 +29,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
// RegisterFlagsWithPrefix registers the flags for Azure storage
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&cfg.StorageAccountName, prefix+"azure.account-name", "", "Azure storage account name")
- f.Var(&cfg.StorageAccountKey, prefix+"azure.account-key", "Azure storage account key")
- f.Var(&cfg.ConnectionString, prefix+"azure.connection-string", "If `connection-string` is set, the values of `account-name` and `endpoint-suffix` values will not be used. Use this method over `account-key` if you need to authenticate via a SAS token. Or if you use the Azurite emulator.")
- f.StringVar(&cfg.ContainerName, prefix+"azure.container-name", "loki", "Azure storage container name")
- f.StringVar(&cfg.EndpointSuffix, prefix+"azure.endpoint-suffix", "", "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN")
+ f.Var(&cfg.StorageAccountKey, prefix+"azure.account-key", "Azure storage account key. If unset, Azure managed identities will be used for authentication instead.")
+ f.Var(&cfg.StorageConnectionString, prefix+"azure.connection-string", "If `connection-string` is set, the value of `endpoint-suffix` will not be used. Use this method over `account-key` if you need to authenticate via a SAS token. Or if you use the Azurite emulator.")
+ f.StringVar(&cfg.ContainerName, prefix+"azure.container-name", "", "Azure storage container name")
+ f.StringVar(&cfg.Endpoint, prefix+"azure.endpoint-suffix", "", "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN. If set to empty string, default endpoint suffix is used.")
f.IntVar(&cfg.MaxRetries, prefix+"azure.max-retries", 20, "Number of retries for recoverable errors")
- cfg.Config.RegisterFlagsWithPrefix(prefix+"azure.", f)
+ f.StringVar(&cfg.UserAssignedID, prefix+"azure.user-assigned-id", "", "User assigned managed identity. If empty, then System assigned identity is used.")
}
diff --git a/pkg/storage/bucket/azure/config_test.go b/pkg/storage/bucket/azure/config_test.go
deleted file mode 100644
index 82357faa147e4..0000000000000
--- a/pkg/storage/bucket/azure/config_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package azure
-
-import (
- "testing"
- "time"
-
- "github.com/grafana/dskit/flagext"
- "github.com/stretchr/testify/require"
- yaml "gopkg.in/yaml.v2"
-
- "github.com/grafana/loki/v3/pkg/storage/bucket/http"
-)
-
-// defaultConfig should match the default flag values defined in RegisterFlagsWithPrefix.
-var defaultConfig = Config{
- ContainerName: "loki",
- MaxRetries: 20,
- Config: http.Config{
- IdleConnTimeout: 90 * time.Second,
- ResponseHeaderTimeout: 2 * time.Minute,
- InsecureSkipVerify: false,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- MaxIdleConns: 100,
- MaxIdleConnsPerHost: 100,
- MaxConnsPerHost: 0,
- },
-}
-
-func TestConfig(t *testing.T) {
- t.Parallel()
-
- tests := map[string]struct {
- config string
- expectedConfig Config
- expectedErr error
- }{
- "default config": {
- config: "",
- expectedConfig: defaultConfig,
- expectedErr: nil,
- },
- "custom config": {
- config: `
-account_name: test-account-name
-account_key: test-account-key
-connection_string: test-connection-string
-container_name: test-container-name
-endpoint_suffix: test-endpoint-suffix
-max_retries: 1
-http:
- idle_conn_timeout: 2s
- response_header_timeout: 3s
- insecure_skip_verify: true
- tls_handshake_timeout: 4s
- expect_continue_timeout: 5s
- max_idle_connections: 6
- max_idle_connections_per_host: 7
- max_connections_per_host: 8
-`,
- expectedConfig: Config{
- StorageAccountName: "test-account-name",
- StorageAccountKey: flagext.SecretWithValue("test-account-key"),
- ConnectionString: flagext.SecretWithValue("test-connection-string"),
- ContainerName: "test-container-name",
- EndpointSuffix: "test-endpoint-suffix",
- MaxRetries: 1,
- Config: http.Config{
- IdleConnTimeout: 2 * time.Second,
- ResponseHeaderTimeout: 3 * time.Second,
- InsecureSkipVerify: true,
- TLSHandshakeTimeout: 4 * time.Second,
- ExpectContinueTimeout: 5 * time.Second,
- MaxIdleConns: 6,
- MaxIdleConnsPerHost: 7,
- MaxConnsPerHost: 8,
- },
- },
- expectedErr: nil,
- },
- "invalid type": {
- config: `max_retries: foo`,
- expectedConfig: defaultConfig,
- expectedErr: &yaml.TypeError{Errors: []string{"line 1: cannot unmarshal !!str `foo` into int"}},
- },
- }
-
- for testName, testData := range tests {
- t.Run(testName, func(t *testing.T) {
- cfg := Config{}
- flagext.DefaultValues(&cfg)
-
- err := yaml.Unmarshal([]byte(testData.config), &cfg)
- require.Equal(t, testData.expectedErr, err)
- require.Equal(t, testData.expectedConfig, cfg)
- })
- }
-}
diff --git a/pkg/storage/bucket/object_client_adapter.go b/pkg/storage/bucket/object_client_adapter.go
new file mode 100644
index 0000000000000..094f0ad2ea7ac
--- /dev/null
+++ b/pkg/storage/bucket/object_client_adapter.go
@@ -0,0 +1,150 @@
+package bucket
+
+import (
+ "context"
+ "io"
+ "strings"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/pkg/errors"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+)
+
+type ObjectClientAdapter struct {
+ bucket, hedgedBucket objstore.Bucket
+ logger log.Logger
+ isRetryableErr func(err error) bool
+}
+
+func NewObjectClientAdapter(bucket, hedgedBucket objstore.Bucket, logger log.Logger, opts ...ClientOptions) *ObjectClientAdapter {
+ if hedgedBucket == nil {
+ hedgedBucket = bucket
+ }
+
+ o := &ObjectClientAdapter{
+ bucket: bucket,
+ hedgedBucket: hedgedBucket,
+ logger: log.With(logger, "component", "bucket_to_object_client_adapter"),
+ // default to no retryable errors. Override with WithRetryableErrFunc
+ isRetryableErr: func(_ error) bool {
+ return false
+ },
+ }
+
+ for _, opt := range opts {
+ opt(o)
+ }
+
+ return o
+}
+
+type ClientOptions func(*ObjectClientAdapter)
+
+func WithRetryableErrFunc(f func(err error) bool) ClientOptions {
+ return func(o *ObjectClientAdapter) {
+ o.isRetryableErr = f
+ }
+}
+
+func (o *ObjectClientAdapter) Stop() {
+}
+
+// ObjectExists checks if a given objectKey exists in the bucket
+func (o *ObjectClientAdapter) ObjectExists(ctx context.Context, objectKey string) (bool, error) {
+ return o.bucket.Exists(ctx, objectKey)
+}
+
+// GetAttributes returns the attributes of the specified object key from the configured bucket.
+func (o *ObjectClientAdapter) GetAttributes(ctx context.Context, objectKey string) (client.ObjectAttributes, error) {
+ attr := client.ObjectAttributes{}
+ thanosAttr, err := o.hedgedBucket.Attributes(ctx, objectKey)
+ if err != nil {
+ return attr, err
+ }
+
+ attr.Size = thanosAttr.Size
+ return attr, nil
+}
+
+// PutObject puts the specified bytes into the configured bucket at the provided key
+func (o *ObjectClientAdapter) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
+ return o.bucket.Upload(ctx, objectKey, object)
+}
+
+// GetObject returns a reader and the size for the specified object key from the configured bucket.
+// size is set to -1 if it cannot be succefully determined, it is up to the caller to check this value before using it.
+func (o *ObjectClientAdapter) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) {
+ reader, err := o.hedgedBucket.Get(ctx, objectKey)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ size, err := objstore.TryToGetSize(reader)
+ if err != nil {
+ size = -1
+ level.Warn(o.logger).Log("msg", "failed to get size of object", "err", err)
+ }
+
+ return reader, size, err
+}
+
+func (o *ObjectClientAdapter) GetObjectRange(ctx context.Context, objectKey string, offset, length int64) (io.ReadCloser, error) {
+ return o.hedgedBucket.GetRange(ctx, objectKey, offset, length)
+}
+
+// List objects with given prefix.
+func (o *ObjectClientAdapter) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
+ var storageObjects []client.StorageObject
+ var commonPrefixes []client.StorageCommonPrefix
+ var iterParams []objstore.IterOption
+
+ // If delimiter is empty we want to list all files
+ if delimiter == "" {
+ iterParams = append(iterParams, objstore.WithRecursiveIter)
+ }
+
+ err := o.bucket.Iter(ctx, prefix, func(objectKey string) error {
+ // CommonPrefixes are keys that have the prefix and have the delimiter
+ // as a suffix
+ if delimiter != "" && strings.HasSuffix(objectKey, delimiter) {
+ commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(objectKey))
+ return nil
+ }
+
+ // TODO: remove this once thanos support IterWithAttributes
+ attr, err := o.bucket.Attributes(ctx, objectKey)
+ if err != nil {
+ return errors.Wrapf(err, "failed to get attributes for %s", objectKey)
+ }
+
+ storageObjects = append(storageObjects, client.StorageObject{
+ Key: objectKey,
+ ModifiedAt: attr.LastModified,
+ })
+
+ return nil
+ }, iterParams...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return storageObjects, commonPrefixes, nil
+}
+
+// DeleteObject deletes the specified object key from the configured bucket.
+func (o *ObjectClientAdapter) DeleteObject(ctx context.Context, objectKey string) error {
+ return o.bucket.Delete(ctx, objectKey)
+}
+
+// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations.
+func (o *ObjectClientAdapter) IsObjectNotFoundErr(err error) bool {
+ return o.bucket.IsObjNotFoundErr(err)
+}
+
+// IsRetryableErr returns true if the request failed due to some retryable server-side scenario
+func (o *ObjectClientAdapter) IsRetryableErr(err error) bool {
+ return o.isRetryableErr(err)
+}
diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go b/pkg/storage/bucket/object_client_adapter_test.go
similarity index 92%
rename from pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go
rename to pkg/storage/bucket/object_client_adapter_test.go
index d8b824a6e5d04..1ce6de26856bf 100644
--- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go
+++ b/pkg/storage/bucket/object_client_adapter_test.go
@@ -1,4 +1,4 @@
-package gcp
+package bucket
import (
"bytes"
@@ -12,7 +12,7 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
)
-func TestGCSThanosObjStore_List(t *testing.T) {
+func TestObjectClientAdapter_List(t *testing.T) {
tests := []struct {
name string
prefix string
@@ -95,10 +95,10 @@ func TestGCSThanosObjStore_List(t *testing.T) {
require.NoError(t, newBucket.Upload(context.Background(), "depply/nested/folder/b", buff))
require.NoError(t, newBucket.Upload(context.Background(), "depply/nested/folder/c", buff))
- gcpClient := &GCSThanosObjectClient{}
- gcpClient.client = newBucket
+ client := NewObjectClientAdapter(newBucket, nil, nil)
+ client.bucket = newBucket
- storageObj, storageCommonPref, err := gcpClient.List(context.Background(), tt.prefix, tt.delimiter)
+ storageObj, storageCommonPref, err := client.List(context.Background(), tt.prefix, tt.delimiter)
if tt.wantErr != nil {
require.Equal(t, tt.wantErr.Error(), err.Error())
continue
diff --git a/pkg/storage/chunk/client/azure/blob_storage_thanos_object_client.go b/pkg/storage/chunk/client/azure/blob_storage_thanos_object_client.go
new file mode 100644
index 0000000000000..4bf2137433064
--- /dev/null
+++ b/pkg/storage/chunk/client/azure/blob_storage_thanos_object_client.go
@@ -0,0 +1,44 @@
+package azure
+
+import (
+ "context"
+
+ "github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/storage/bucket"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client/hedging"
+)
+
+// NewBlobStorageObjectClient makes a new BlobStorage-backed ObjectClient.
+func NewBlobStorageThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (client.ObjectClient, error) {
+ b, err := newBlobStorageThanosObjClient(ctx, cfg, component, logger, false, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ var hedged objstore.Bucket
+ if hedgingCfg.At != 0 {
+ hedged, err = newBlobStorageThanosObjClient(ctx, cfg, component, logger, true, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return bucket.NewObjectClientAdapter(b, hedged, logger), nil
+}
+
+func newBlobStorageThanosObjClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedging bool, hedgingCfg hedging.Config) (objstore.Bucket, error) {
+ if hedging {
+ hedgedTrasport, err := hedgingCfg.RoundTripperWithRegisterer(nil, prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer))
+ if err != nil {
+ return nil, err
+ }
+
+ cfg.Azure.Transport = hedgedTrasport
+ }
+
+ return bucket.NewClient(ctx, bucket.Azure, cfg, component, logger)
+}
diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client.go b/pkg/storage/chunk/client/gcp/gcs_object_client.go
index 9b05b57404c49..1d44659b3f3cc 100644
--- a/pkg/storage/chunk/client/gcp/gcs_object_client.go
+++ b/pkg/storage/chunk/client/gcp/gcs_object_client.go
@@ -279,7 +279,7 @@ func isContextErr(err error) bool {
}
// IsStorageTimeoutErr returns true if error means that object cannot be retrieved right now due to server-side timeouts.
-func (s *GCSObjectClient) IsStorageTimeoutErr(err error) bool {
+func IsStorageTimeoutErr(err error) bool {
// TODO(dannyk): move these out to be generic
// context errors are all client-side
if isContextErr(err) {
@@ -315,7 +315,7 @@ func (s *GCSObjectClient) IsStorageTimeoutErr(err error) bool {
}
// IsStorageThrottledErr returns true if error means that object cannot be retrieved right now due to throttling.
-func (s *GCSObjectClient) IsStorageThrottledErr(err error) bool {
+func IsStorageThrottledErr(err error) bool {
if gerr, ok := err.(*googleapi.Error); ok {
// https://cloud.google.com/storage/docs/retry-strategy
return gerr.Code == http.StatusTooManyRequests ||
@@ -325,9 +325,14 @@ func (s *GCSObjectClient) IsStorageThrottledErr(err error) bool {
return false
}
+// IsRetryableErr returns true if the request failed due to some retryable server-side scenario
+func IsRetryableErr(err error) bool {
+ return IsStorageTimeoutErr(err) || IsStorageThrottledErr(err)
+}
+
// IsRetryableErr returns true if the request failed due to some retryable server-side scenario
func (s *GCSObjectClient) IsRetryableErr(err error) bool {
- return s.IsStorageTimeoutErr(err) || s.IsStorageThrottledErr(err)
+ return IsRetryableErr(err)
}
func gcsTransport(ctx context.Context, scope string, insecure bool, http2 bool, serviceAccount flagext.Secret) (http.RoundTripper, error) {
diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client_test.go b/pkg/storage/chunk/client/gcp/gcs_object_client_test.go
index c885c4c1d780c..a0e6313f7ce43 100644
--- a/pkg/storage/chunk/client/gcp/gcs_object_client_test.go
+++ b/pkg/storage/chunk/client/gcp/gcs_object_client_test.go
@@ -147,8 +147,8 @@ func TestUpstreamRetryableErrs(t *testing.T) {
require.NoError(t, err)
_, _, err = cli.GetObject(ctx, "foo")
- require.Equal(t, tc.isThrottledErr, cli.IsStorageThrottledErr(err))
- require.Equal(t, tc.isTimeoutErr, cli.IsStorageTimeoutErr(err))
+ require.Equal(t, tc.isThrottledErr, IsStorageThrottledErr(err))
+ require.Equal(t, tc.isTimeoutErr, IsStorageTimeoutErr(err))
})
}
}
@@ -229,7 +229,7 @@ func TestTCPErrs(t *testing.T) {
_, _, err = cli.GetObject(ctx, "foo")
require.Error(t, err)
- require.Equal(t, tc.retryable, cli.IsStorageTimeoutErr(err))
+ require.Equal(t, tc.retryable, IsStorageTimeoutErr(err))
})
}
}
diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go
index af0ae55b82cc2..b4190be2d6943 100644
--- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go
+++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go
@@ -2,54 +2,32 @@ package gcp
import (
"context"
- "io"
- "net"
- "net/http"
- "strings"
"github.com/go-kit/log"
- "github.com/go-kit/log/level"
- "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/thanos-io/objstore"
- "google.golang.org/api/googleapi"
- amnet "k8s.io/apimachinery/pkg/util/net"
"github.com/grafana/loki/v3/pkg/storage/bucket"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/hedging"
)
-type GCSThanosObjectClient struct {
- client objstore.Bucket
- hedgedClient objstore.Bucket
- logger log.Logger
-}
-
-func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (*GCSThanosObjectClient, error) {
- client, err := newGCSThanosObjectClient(ctx, cfg, component, logger, false, hedgingCfg)
+func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (client.ObjectClient, error) {
+ b, err := newGCSThanosObjectClient(ctx, cfg, component, logger, false, hedgingCfg)
if err != nil {
return nil, err
}
- if hedgingCfg.At == 0 {
- return &GCSThanosObjectClient{
- client: client,
- hedgedClient: client,
- logger: logger,
- }, nil
- }
-
- hedgedClient, err := newGCSThanosObjectClient(ctx, cfg, component, logger, true, hedgingCfg)
- if err != nil {
- return nil, err
+ var hedged objstore.Bucket
+ if hedgingCfg.At != 0 {
+ hedged, err = newGCSThanosObjectClient(ctx, cfg, component, logger, true, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
}
- return &GCSThanosObjectClient{
- client: client,
- hedgedClient: hedgedClient,
- logger: logger,
- }, nil
+ o := bucket.NewObjectClientAdapter(b, hedged, logger, bucket.WithRetryableErrFunc(IsRetryableErr))
+ return o, nil
}
func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedging bool, hedgingCfg hedging.Config) (objstore.Bucket, error) {
@@ -64,150 +42,3 @@ func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component
return bucket.NewClient(ctx, bucket.GCS, cfg, component, logger)
}
-
-func (s *GCSThanosObjectClient) Stop() {
-}
-
-// ObjectExists checks if a given objectKey exists in the GCS bucket
-func (s *GCSThanosObjectClient) ObjectExists(ctx context.Context, objectKey string) (bool, error) {
- return s.client.Exists(ctx, objectKey)
-}
-
-// GetAttributes returns the attributes of the specified object key from the configured GCS bucket.
-func (s *GCSThanosObjectClient) GetAttributes(ctx context.Context, objectKey string) (client.ObjectAttributes, error) {
- attr := client.ObjectAttributes{}
- thanosAttr, err := s.hedgedClient.Attributes(ctx, objectKey)
- if err != nil {
- return attr, err
- }
-
- attr.Size = thanosAttr.Size
- return attr, nil
-}
-
-// PutObject puts the specified bytes into the configured GCS bucket at the provided key
-func (s *GCSThanosObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
- return s.client.Upload(ctx, objectKey, object)
-}
-
-// GetObject returns a reader and the size for the specified object key from the configured GCS bucket.
-// size is set to -1 if it cannot be succefully determined, it is up to the caller to check this value before using it.
-func (s *GCSThanosObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) {
- reader, err := s.hedgedClient.Get(ctx, objectKey)
- if err != nil {
- return nil, 0, err
- }
-
- size, err := objstore.TryToGetSize(reader)
- if err != nil {
- size = -1
- level.Warn(s.logger).Log("msg", "failed to get size of object", "err", err)
- }
-
- return reader, size, err
-}
-
-func (s *GCSThanosObjectClient) GetObjectRange(ctx context.Context, objectKey string, offset, length int64) (io.ReadCloser, error) {
- return s.hedgedClient.GetRange(ctx, objectKey, offset, length)
-}
-
-// List objects with given prefix.
-func (s *GCSThanosObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
- var storageObjects []client.StorageObject
- var commonPrefixes []client.StorageCommonPrefix
- var iterParams []objstore.IterOption
-
- // If delimiter is empty we want to list all files
- if delimiter == "" {
- iterParams = append(iterParams, objstore.WithRecursiveIter)
- }
-
- err := s.client.Iter(ctx, prefix, func(objectKey string) error {
- // CommonPrefixes are keys that have the prefix and have the delimiter
- // as a suffix
- if delimiter != "" && strings.HasSuffix(objectKey, delimiter) {
- commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(objectKey))
- return nil
- }
-
- // TODO: remove this once thanos support IterWithAttributes
- attr, err := s.client.Attributes(ctx, objectKey)
- if err != nil {
- return errors.Wrapf(err, "failed to get attributes for %s", objectKey)
- }
-
- storageObjects = append(storageObjects, client.StorageObject{
- Key: objectKey,
- ModifiedAt: attr.LastModified,
- })
-
- return nil
- }, iterParams...)
- if err != nil {
- return nil, nil, err
- }
-
- return storageObjects, commonPrefixes, nil
-}
-
-// DeleteObject deletes the specified object key from the configured GCS bucket.
-func (s *GCSThanosObjectClient) DeleteObject(ctx context.Context, objectKey string) error {
- return s.client.Delete(ctx, objectKey)
-}
-
-// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations.
-func (s *GCSThanosObjectClient) IsObjectNotFoundErr(err error) bool {
- return s.client.IsObjNotFoundErr(err)
-}
-
-// IsStorageTimeoutErr returns true if error means that object cannot be retrieved right now due to server-side timeouts.
-func (s *GCSThanosObjectClient) IsStorageTimeoutErr(err error) bool {
- // TODO(dannyk): move these out to be generic
- // context errors are all client-side
- if isContextErr(err) {
- // Go 1.23 changed the type of the error returned by the http client when a timeout occurs
- // while waiting for headers. This is a server side timeout.
- return strings.Contains(err.Error(), "Client.Timeout")
- }
-
- // connection misconfiguration, or writing on a closed connection
- // do NOT retry; this is not a server-side issue
- if errors.Is(err, net.ErrClosed) || amnet.IsConnectionRefused(err) {
- return false
- }
-
- // this is a server-side timeout
- if isTimeoutError(err) {
- return true
- }
-
- // connection closed (closed before established) or reset (closed after established)
- // this is a server-side issue
- if errors.Is(err, io.EOF) || amnet.IsConnectionReset(err) {
- return true
- }
-
- if gerr, ok := err.(*googleapi.Error); ok {
- // https://cloud.google.com/storage/docs/retry-strategy
- return gerr.Code == http.StatusRequestTimeout ||
- gerr.Code == http.StatusGatewayTimeout
- }
-
- return false
-}
-
-// IsStorageThrottledErr returns true if error means that object cannot be retrieved right now due to throttling.
-func (s *GCSThanosObjectClient) IsStorageThrottledErr(err error) bool {
- if gerr, ok := err.(*googleapi.Error); ok {
- // https://cloud.google.com/storage/docs/retry-strategy
- return gerr.Code == http.StatusTooManyRequests ||
- (gerr.Code/100 == 5) // all 5xx errors are retryable
- }
-
- return false
-}
-
-// IsRetryableErr returns true if the request failed due to some retryable server-side scenario
-func (s *GCSThanosObjectClient) IsRetryableErr(err error) bool {
- return s.IsStorageTimeoutErr(err) || s.IsStorageThrottledErr(err)
-}
diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go
index 69a7693fbe83e..79135abd26d00 100644
--- a/pkg/storage/factory.go
+++ b/pkg/storage/factory.go
@@ -697,6 +697,9 @@ func internalNewObjectClient(storeName, component string, cfg Config, clientMetr
}
azureCfg = (azure.BlobStorageConfig)(nsCfg)
}
+ if cfg.UseThanosObjstore {
+ return azure.NewBlobStorageThanosObjectClient(context.Background(), cfg.ObjectStore, component, util_log.Logger, cfg.Hedging)
+ }
return azure.NewBlobStorage(&azureCfg, clientMetrics.AzureMetrics, cfg.Hedging)
case types.StorageTypeSwift:
|
feat
|
Azure backend using thanos.io/objstore (#11315)
|
8bb615c2cce383890cbe94502c52c5a5d7ec5095
|
2023-10-11 10:00:31
|
Ashwanth
|
config: loki better defaults (#10793)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0423b302991cf..dca902898e6d4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -23,6 +23,7 @@
* [10709](https://github.com/grafana/loki/pull/10709) **chaudum**/**salvacorts** Remove `ingester.max-transfer-retries` configuration option in favor of using the WAL.
* [10736](https://github.com/grafana/loki/pull/10736) **ashwanthgoli** Deprecate write dedupe cache as this is not required by the newer single store indexes (tsdb and boltdb-shipper).
* [10693](https://github.com/grafana/loki/pull/10693) **ashwanthgoli** Embedded cache: Updates the metric prefix from `querier_cache_` to `loki_embeddedcache_` and removes duplicate metrics.
+* [10793](https://github.com/grafana/loki/pull/10793) **ashwanthgoli** Config: Better configuration defaults to provide a better experience for users out of the box.
* [10785](https://github.com/grafana/loki/pull/10785) **ashwanthgoli** Config: Removes `querier.worker-parallelism` and updates default value of `querier.max-concurrent` to 4.
##### Fixes
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index 24ba529e201a9..9868a58e38459 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -577,7 +577,7 @@ The `query_scheduler` block configures the Loki query scheduler. When configured
# In-flight requests above this limit will fail with HTTP response status code
# 429.
# CLI flag: -query-scheduler.max-outstanding-requests-per-tenant
-[max_outstanding_requests_per_tenant: <int> | default = 100]
+[max_outstanding_requests_per_tenant: <int> | default = 32000]
# Maximum number of levels of nesting of hierarchical queues. 0 means that
# hierarchical queues are disabled.
@@ -757,7 +757,7 @@ The `frontend` block configures the Loki query-frontend.
# Compress HTTP responses.
# CLI flag: -querier.compress-http-responses
-[compress_responses: <boolean> | default = false]
+[compress_responses: <boolean> | default = true]
# URL of downstream Loki.
# CLI flag: -frontend.downstream-url
@@ -1521,11 +1521,11 @@ lifecycler:
# utilization isn't high enough (eg. less than 50% when sync_min_utilization is
# set to 0.5), then this chunk rollover doesn't happen.
# CLI flag: -ingester.sync-period
-[sync_period: <duration> | default = 0s]
+[sync_period: <duration> | default = 1h]
# Minimum utilization of chunk when doing synchronization.
# CLI flag: -ingester.sync-min-utilization
-[sync_min_utilization: <float> | default = 0]
+[sync_min_utilization: <float> | default = 0.1]
# The maximum number of errors a stream will report to the user when a push
# fails. 0 to make unlimited.
@@ -2220,7 +2220,7 @@ The `compactor` block configures the compactor component, which compacts index s
# delete_max_interval is input, the request is sharded into smaller requests of
# no more than delete_max_interval
# CLI flag: -compactor.delete-max-interval
-[delete_max_interval: <duration> | default = 0s]
+[delete_max_interval: <duration> | default = 24h]
# Maximum number of tables to compact in parallel. While increasing this value,
# please make sure compactor has enough disk space allocated to be able to store
@@ -2375,7 +2375,7 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# Maximum number of label names per series.
# CLI flag: -validation.max-label-names-per-series
-[max_label_names_per_series: <int> | default = 30]
+[max_label_names_per_series: <int> | default = 15]
# Whether or not old samples will be rejected.
# CLI flag: -validation.reject-old-samples
@@ -2399,7 +2399,7 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# set which in case it is truncated instead of discarding it completely. There
# is no limit when unset or set to 0.
# CLI flag: -distributor.max-line-size
-[max_line_size: <int> | default = 0B]
+[max_line_size: <int> | default = 256KB]
# Whether to truncate lines that exceed max_line_size.
# CLI flag: -distributor.max-line-size-truncate
@@ -2476,7 +2476,7 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# Maximum number of queries will be scheduled in parallel by the frontend for
# TSDB schemas.
# CLI flag: -querier.tsdb-max-query-parallelism
-[tsdb_max_query_parallelism: <int> | default = 512]
+[tsdb_max_query_parallelism: <int> | default = 128]
# Maximum number of bytes assigned to a single sharded query. Also expressible
# in human readable forms (1GB, etc).
@@ -2502,12 +2502,12 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# Most recent allowed cacheable result per-tenant, to prevent caching very
# recent results that might still be in flux.
# CLI flag: -frontend.max-cache-freshness
-[max_cache_freshness_per_query: <duration> | default = 1m]
+[max_cache_freshness_per_query: <duration> | default = 10m]
# Do not cache requests with an end time that falls within Now minus this
# duration. 0 disables this feature (default).
# CLI flag: -frontend.max-stats-cache-freshness
-[max_stats_cache_freshness: <duration> | default = 0s]
+[max_stats_cache_freshness: <duration> | default = 10m]
# Maximum number of queriers that can handle requests for a single tenant. If
# set to 0 or value higher than number of available queriers, *all* queriers
@@ -2534,7 +2534,7 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# splitting by time. This also determines how cache keys are chosen when result
# caching is enabled.
# CLI flag: -querier.split-queries-by-interval
-[split_queries_by_interval: <duration> | default = 30m]
+[split_queries_by_interval: <duration> | default = 1h]
# Limit queries that can be sharded. Queries within the time range of now and
# now minus this sharding lookback are not sharded. The default value of 0s
@@ -2551,7 +2551,7 @@ The `limits_config` block configures global and per-tenant limits in Loki.
# in log and metric queries only when TSDB is used. The default value of 0
# disables this limit.
# CLI flag: -frontend.max-querier-bytes-read
-[max_querier_bytes_read: <int> | default = 0B]
+[max_querier_bytes_read: <int> | default = 150GB]
# Enable log-volume endpoints.
[volume_enabled: <boolean>]
@@ -3956,11 +3956,11 @@ memcached:
# How many keys to fetch in each batch.
# CLI flag: -<prefix>.memcached.batchsize
- [batch_size: <int> | default = 1024]
+ [batch_size: <int> | default = 256]
# Maximum active requests to memcache.
# CLI flag: -<prefix>.memcached.parallelism
- [parallelism: <int> | default = 100]
+ [parallelism: <int> | default = 10]
memcached_client:
# Hostname for memcached service to use. If empty and if addresses is unset,
diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md
index 36371dbda679d..dd489caeae850 100644
--- a/docs/sources/setup/upgrade/_index.md
+++ b/docs/sources/setup/upgrade/_index.md
@@ -78,9 +78,26 @@ This new metric will provide a more clear signal that there is an issue with ing
#### Changes to default configuration values
-1. `querier.max-concurrent` now defaults to 4. Consider increasing this if queriers have access to more CPU resources.
- Note that you risk running into out of memory errors if you set this to a very high value.
-1. `frontend.embedded-cache.max-size-mb` Embedded results cache size now defaults to 100MB.
+{{% responsive-table %}}
+| configuration | new default | old default | notes |
+| ------------------------------------------------------ | ----------- | ----------- | --------
+| `compactor.delete-max-interval` | 24h | 0 | splits the delete requests into intervals no longer than `delete_max_interval` |
+| `distributor.max-line-size` | 256KB | 0 | - |
+| `ingester.sync-period` | 1h | 0 | ensures that the chunk cuts for a given stream are synchronized across the ingesters in the replication set. Helps with deduplicating chunks. |
+| `ingester.sync-min-utilization` | 0.1 | 0 | - |
+| `frontend.max-querier-bytes-read` | 150GB | 0 | - |
+| `frontend.max-cache-freshness` | 10m | 1m | - |
+| `frontend.max-stats-cache-freshness` | 10m | 0 | - |
+| `frontend.embedded-cache.max-size-mb` | 100MB | 1GB | embedded results cache size now defaults to 100MB |
+| `memcached.batchsize` | 256 | 1024 | - |
+| `memcached.parallelism` | 10 | 100 | - |
+| `querier.compress-http-responses` | true | false | compress response if the request accepts gzip encoding |
+| `querier.max-concurrent` | 4 | 10 | Consider increasing this if queriers have access to more CPU resources. Note that you risk running into out of memory errors if you set this to a very high value. |
+| `querier.split-queries-by-interval` | 1h | 30m | - |
+| `querier.tsdb-max-query-parallelism` | 128 | 512 | - |
+| `query-scheduler.max-outstanding-requests-per-tenant` | 32000 | 100 | - |
+| `validation.max-label-names-per-series` | 15 | 30 | - |
+{{% /responsive-table %}}
#### Write dedupe cache is deprecated
Write dedupe cache is deprecated because it not required by the newer single store indexes ([TSDB]({{< relref "../../operations/storage/tsdb" >}}) and [boltdb-shipper]({{< relref "../../operations/storage/boltdb-shipper" >}})).
diff --git a/integration/loki_micro_services_delete_test.go b/integration/loki_micro_services_delete_test.go
index 4643d3054e86b..05dfb021ee4f5 100644
--- a/integration/loki_micro_services_delete_test.go
+++ b/integration/loki_micro_services_delete_test.go
@@ -40,11 +40,12 @@ func TestMicroServicesDeleteRequest(t *testing.T) {
tCompactor = clu.AddComponent(
"compactor",
"-target=compactor",
- "-boltdb.shipper.compactor.compaction-interval=1s",
- "-boltdb.shipper.compactor.retention-delete-delay=1s",
+ "-compactor.compaction-interval=1s",
+ "-compactor.retention-delete-delay=1s",
// By default, a minute is added to the delete request start time. This compensates for that.
- "-boltdb.shipper.compactor.delete-request-cancel-period=-60s",
+ "-compactor.delete-request-cancel-period=-60s",
"-compactor.deletion-mode=filter-only",
+ "-compactor.delete-max-interval=0",
"-limits.per-user-override-period=1s",
)
tDistributor = clu.AddComponent(
diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go
index 33323e2af0696..67648701678b1 100644
--- a/integration/loki_micro_services_test.go
+++ b/integration/loki_micro_services_test.go
@@ -652,7 +652,6 @@ func TestQueryTSDB_WithCachedPostings(t *testing.T) {
require.NoError(t, err)
assertCacheState(t, igwMetrics, &expectedCacheState{
cacheName: "store.index-cache-read.embedded-cache",
- gets: 0,
misses: 0,
added: 0,
})
@@ -685,7 +684,6 @@ func TestQueryTSDB_WithCachedPostings(t *testing.T) {
require.NoError(t, err)
assertCacheState(t, igwMetrics, &expectedCacheState{
cacheName: "store.index-cache-read.embedded-cache",
- gets: 50,
misses: 1,
added: 1,
})
@@ -745,18 +743,16 @@ func assertCacheState(t *testing.T, metrics string, e *expectedCacheState) {
},
}
- mf, found = mfs["loki_cache_fetched_keys"]
+ gets, found := mfs["loki_cache_fetched_keys"]
require.True(t, found)
- require.Equal(t, e.gets, getValueFromMF(mf, lbs))
- mf, found = mfs["loki_cache_hits"]
+ hits, found := mfs["loki_cache_hits"]
require.True(t, found)
- require.Equal(t, e.gets-e.misses, getValueFromMF(mf, lbs))
+ require.Equal(t, e.misses, getValueFromMF(gets, lbs)-getValueFromMF(hits, lbs))
}
type expectedCacheState struct {
cacheName string
- gets float64
misses float64
added float64
}
diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go
index d3fae20f9ab9f..b1f78d46dafcb 100644
--- a/pkg/compactor/compactor.go
+++ b/pkg/compactor/compactor.go
@@ -114,7 +114,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&cfg.DeleteRequestStore, prefix+"compactor.delete-request-store", "", deprecated+"Store used for managing delete requests. Defaults to -compactor.shared-store.")
f.IntVar(&cfg.DeleteBatchSize, prefix+"compactor.delete-batch-size", 70, deprecated+"The max number of delete requests to run per compaction cycle.")
f.DurationVar(&cfg.DeleteRequestCancelPeriod, prefix+"compactor.delete-request-cancel-period", 24*time.Hour, deprecated+"Allow cancellation of delete request until duration after they are created. Data would be deleted only after delete requests have been older than this duration. Ideally this should be set to at least 24h.")
- f.DurationVar(&cfg.DeleteMaxInterval, prefix+"compactor.delete-max-interval", 0, deprecated+"Constrain the size of any single delete request. When a delete request > delete_max_interval is input, the request is sharded into smaller requests of no more than delete_max_interval")
+ f.DurationVar(&cfg.DeleteMaxInterval, prefix+"compactor.delete-max-interval", 24*time.Hour, deprecated+"Constrain the size of any single delete request. When a delete request > delete_max_interval is input, the request is sharded into smaller requests of no more than delete_max_interval")
f.DurationVar(&cfg.RetentionTableTimeout, prefix+"compactor.retention-table-timeout", 0, deprecated+"The maximum amount of time to spend running retention and deletion on any given table in the index.")
f.IntVar(&cfg.MaxCompactionParallelism, prefix+"compactor.max-compaction-parallelism", 1, deprecated+"Maximum number of tables to compact in parallel. While increasing this value, please make sure compactor has enough disk space allocated to be able to store and compact as many tables.")
f.IntVar(&cfg.UploadParallelism, prefix+"compactor.upload-parallelism", 10, deprecated+"Number of upload/remove operations to execute in parallel when finalizing a compaction. NOTE: This setting is per compaction operation, which can be executed in parallel. The upper bound on the number of concurrent uploads is upload_parallelism * max_compaction_parallelism.")
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 54fbf73e44665..e8e8f5ff20687 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -126,8 +126,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.BlockSize, "ingester.chunks-block-size", 256*1024, "The targeted _uncompressed_ size in bytes of a chunk block When this threshold is exceeded the head block will be cut and compressed inside the chunk.")
f.IntVar(&cfg.TargetChunkSize, "ingester.chunk-target-size", 1572864, "A target _compressed_ size in bytes for chunks. This is a desired size not an exact size, chunks may be slightly bigger or significantly smaller if they get flushed for other reasons (e.g. chunk_idle_period). A value of 0 creates chunks with a fixed 10 blocks, a non zero value will create chunks with a variable number of blocks to meet the target size.") // 1.5 MB
f.StringVar(&cfg.ChunkEncoding, "ingester.chunk-encoding", chunkenc.EncGZIP.String(), fmt.Sprintf("The algorithm to use for compressing chunk. (%s)", chunkenc.SupportedEncoding()))
- f.DurationVar(&cfg.SyncPeriod, "ingester.sync-period", 0, "Parameters used to synchronize ingesters to cut chunks at the same moment. Sync period is used to roll over incoming entry to a new chunk. If chunk's utilization isn't high enough (eg. less than 50% when sync_min_utilization is set to 0.5), then this chunk rollover doesn't happen.")
- f.Float64Var(&cfg.SyncMinUtilization, "ingester.sync-min-utilization", 0, "Minimum utilization of chunk when doing synchronization.")
+ f.DurationVar(&cfg.SyncPeriod, "ingester.sync-period", 1*time.Hour, "Parameters used to synchronize ingesters to cut chunks at the same moment. Sync period is used to roll over incoming entry to a new chunk. If chunk's utilization isn't high enough (eg. less than 50% when sync_min_utilization is set to 0.5), then this chunk rollover doesn't happen.")
+ f.Float64Var(&cfg.SyncMinUtilization, "ingester.sync-min-utilization", 0.1, "Minimum utilization of chunk when doing synchronization.")
f.IntVar(&cfg.MaxReturnedErrors, "ingester.max-ignored-stream-errors", 10, "The maximum number of errors a stream will report to the user when a push fails. 0 to make unlimited.")
f.DurationVar(&cfg.MaxChunkAge, "ingester.max-chunk-age", 2*time.Hour, "The maximum duration of a timeseries chunk in memory. If a timeseries runs for longer than this, the current chunk will be flushed to the store and a new chunk created.")
f.DurationVar(&cfg.QueryStoreMaxLookBackPeriod, "ingester.query-store-max-look-back-period", 0, "How far back should an ingester be allowed to query the store for data, for use only with boltdb-shipper/tsdb index and filesystem object store. -1 for infinite.")
diff --git a/pkg/lokifrontend/config.go b/pkg/lokifrontend/config.go
index 2d0a3f4c4e679..30ab5cd29fecc 100644
--- a/pkg/lokifrontend/config.go
+++ b/pkg/lokifrontend/config.go
@@ -29,7 +29,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.FrontendV2.RegisterFlags(f)
cfg.TLS.RegisterFlagsWithPrefix("frontend.tail-tls-config", f)
- f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", false, "Compress HTTP responses.")
+ f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", true, "Compress HTTP responses.")
f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Loki.")
f.StringVar(&cfg.TailProxyURL, "frontend.tail-proxy-url", "", "URL of querier for tail proxy.")
}
diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go
index 938fc96d88dc8..905cb327e34a9 100644
--- a/pkg/scheduler/scheduler.go
+++ b/pkg/scheduler/scheduler.go
@@ -105,7 +105,7 @@ type Config struct {
}
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
- f.IntVar(&cfg.MaxOutstandingPerTenant, "query-scheduler.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per query-scheduler. In-flight requests above this limit will fail with HTTP response status code 429.")
+ f.IntVar(&cfg.MaxOutstandingPerTenant, "query-scheduler.max-outstanding-requests-per-tenant", 32000, "Maximum number of outstanding requests per tenant per query-scheduler. In-flight requests above this limit will fail with HTTP response status code 429.")
f.IntVar(&cfg.MaxQueueHierarchyLevels, "query-scheduler.max-queue-hierarchy-levels", 3, "Maximum number of levels of nesting of hierarchical queues. 0 means that hierarchical queues are disabled.")
f.DurationVar(&cfg.QuerierForgetDelay, "query-scheduler.querier-forget-delay", 0, "If a querier disconnects without sending notification about graceful shutdown, the query-scheduler will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.")
cfg.GRPCClientConfig.RegisterFlagsWithPrefix("query-scheduler.grpc-client-config", f)
diff --git a/pkg/storage/chunk/cache/memcached.go b/pkg/storage/chunk/cache/memcached.go
index 172bb3d19df03..764c1b0abf798 100644
--- a/pkg/storage/chunk/cache/memcached.go
+++ b/pkg/storage/chunk/cache/memcached.go
@@ -29,8 +29,8 @@ type MemcachedConfig struct {
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet
func (cfg *MemcachedConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) {
f.DurationVar(&cfg.Expiration, prefix+"memcached.expiration", 0, description+"How long keys stay in the memcache.")
- f.IntVar(&cfg.BatchSize, prefix+"memcached.batchsize", 1024, description+"How many keys to fetch in each batch.")
- f.IntVar(&cfg.Parallelism, prefix+"memcached.parallelism", 100, description+"Maximum active requests to memcache.")
+ f.IntVar(&cfg.BatchSize, prefix+"memcached.batchsize", 256, description+"How many keys to fetch in each batch.")
+ f.IntVar(&cfg.Parallelism, prefix+"memcached.parallelism", 10, description+"Maximum active requests to memcache.")
}
// Memcached type caches chunks in memcached
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index 54343f6ed160f..edec616249538 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -207,11 +207,13 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&l.IngestionRateStrategy, "distributor.ingestion-rate-limit-strategy", "global", "Whether the ingestion rate limit should be applied individually to each distributor instance (local), or evenly shared across the cluster (global). The ingestion rate strategy cannot be overridden on a per-tenant basis.\n- local: enforces the limit on a per distributor basis. The actual effective rate limit will be N times higher, where N is the number of distributor replicas.\n- global: enforces the limit globally, configuring a per-distributor local rate limiter as 'ingestion_rate / N', where N is the number of distributor replicas (it's automatically adjusted if the number of replicas change). The global strategy requires the distributors to form their own ring, which is used to keep track of the current number of healthy distributor replicas.")
f.Float64Var(&l.IngestionRateMB, "distributor.ingestion-rate-limit-mb", 4, "Per-user ingestion rate limit in sample size per second. Units in MB.")
f.Float64Var(&l.IngestionBurstSizeMB, "distributor.ingestion-burst-size-mb", 6, "Per-user allowed ingestion burst size (in sample size). Units in MB. The burst size refers to the per-distributor local rate limiter even in the case of the 'global' strategy, and should be set at least to the maximum logs size expected in a single push request.")
+
+ _ = l.MaxLineSize.Set("256KB")
f.Var(&l.MaxLineSize, "distributor.max-line-size", "Maximum line size on ingestion path. Example: 256kb. Any log line exceeding this limit will be discarded unless `distributor.max-line-size-truncate` is set which in case it is truncated instead of discarding it completely. There is no limit when unset or set to 0.")
f.BoolVar(&l.MaxLineSizeTruncate, "distributor.max-line-size-truncate", false, "Whether to truncate lines that exceed max_line_size.")
f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names.")
f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name.")
- f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.")
+ f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 15, "Maximum number of label names per series.")
f.BoolVar(&l.RejectOldSamples, "validation.reject-old-samples", true, "Whether or not old samples will be rejected.")
f.BoolVar(&l.IncrementDuplicateTimestamp, "validation.increment-duplicate-timestamps", false, "Alter the log line timestamp during ingestion when the timestamp is the same as the previous entry for the same stream. When enabled, if a log line in a push request has the same timestamp as the previous line for the same stream, one nanosecond is added to the log line. This will preserve the received order of log lines with the exact same timestamp when they are queried, by slightly altering their stored timestamp. NOTE: This is imperfect, because Loki accepts out of order writes, and another push request for the same stream could contain duplicate timestamps to existing entries and they will not be incremented.")
@@ -246,7 +248,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
_ = l.MaxQueryLookback.Set("0s")
f.Var(&l.MaxQueryLookback, "querier.max-query-lookback", "Limit how far back in time series data and metadata can be queried, up until lookback duration ago. This limit is enforced in the query frontend, the querier and the ruler. If the requested time range is outside the allowed range, the request will not fail, but will be modified to only query data within the allowed time range. The default value of 0 does not set a limit.")
f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 32, "Maximum number of queries that will be scheduled in parallel by the frontend.")
- f.IntVar(&l.TSDBMaxQueryParallelism, "querier.tsdb-max-query-parallelism", 512, "Maximum number of queries will be scheduled in parallel by the frontend for TSDB schemas.")
+ f.IntVar(&l.TSDBMaxQueryParallelism, "querier.tsdb-max-query-parallelism", 128, "Maximum number of queries will be scheduled in parallel by the frontend for TSDB schemas.")
_ = l.TSDBMaxBytesPerShard.Set(strconv.Itoa(DefaultTSDBMaxBytesPerShard))
f.Var(&l.TSDBMaxBytesPerShard, "querier.tsdb-max-bytes-per-shard", "Maximum number of bytes assigned to a single sharded query. Also expressible in human readable forms (1GB, etc).")
f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.")
@@ -257,11 +259,14 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.Var(&l.MinShardingLookback, "frontend.min-sharding-lookback", "Limit queries that can be sharded. Queries within the time range of now and now minus this sharding lookback are not sharded. The default value of 0s disables the lookback, causing sharding of all queries at all times.")
f.Var(&l.MaxQueryBytesRead, "frontend.max-query-bytes-read", "Max number of bytes a query can fetch. Enforced in log and metric queries only when TSDB is used. The default value of 0 disables this limit.")
+
+ _ = l.MaxQuerierBytesRead.Set("150GB")
f.Var(&l.MaxQuerierBytesRead, "frontend.max-querier-bytes-read", "Max number of bytes a query can fetch after splitting and sharding. Enforced in log and metric queries only when TSDB is used. The default value of 0 disables this limit.")
- _ = l.MaxCacheFreshness.Set("1m")
+ _ = l.MaxCacheFreshness.Set("10m")
f.Var(&l.MaxCacheFreshness, "frontend.max-cache-freshness", "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.")
+ _ = l.MaxStatsCacheFreshness.Set("10m")
f.Var(&l.MaxStatsCacheFreshness, "frontend.max-stats-cache-freshness", "Do not cache requests with an end time that falls within Now minus this duration. 0 disables this feature (default).")
f.IntVar(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.")
@@ -281,7 +286,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
_ = l.PerTenantOverridePeriod.Set("10s")
f.Var(&l.PerTenantOverridePeriod, "limits.per-user-override-period", "Feature renamed to 'runtime configuration'; flag deprecated in favor of -runtime-config.reload-period (runtime_config.period in YAML).")
- _ = l.QuerySplitDuration.Set("30m")
+ _ = l.QuerySplitDuration.Set("1h")
f.Var(&l.QuerySplitDuration, "querier.split-queries-by-interval", "Split queries by a time interval and execute in parallel. The value 0 disables splitting by time. This also determines how cache keys are chosen when result caching is enabled.")
f.StringVar(&l.DeletionMode, "compactor.deletion-mode", "filter-and-delete", "Deletion mode. Can be one of 'disabled', 'filter-only', or 'filter-and-delete'. When set to 'filter-only' or 'filter-and-delete', and if retention_enabled is true, then the log entry deletion API endpoints are available.")
|
config
|
loki better defaults (#10793)
|
e54a1b21c3e02e4f10fda6b1023ca02e683db6c6
|
2019-04-25 20:44:07
|
Goutham Veeramachaneni
|
vendor: update cortex (#512)
| false
|
diff --git a/Gopkg.lock b/Gopkg.lock
index c8a65645eac1f..bc716a09b7cf4 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -187,7 +187,7 @@
[[projects]]
branch = "lazy-load-chunks"
- digest = "1:ec8e0308d1e557f50317a6437073a7a859d73e4cf8e4c20a60d7009e352353c6"
+ digest = "1:bf1fa66c54722bc8664f1465e427cd6fe7df52f2b6fd5ab996baf37601687b70"
name = "github.com/cortexproject/cortex"
packages = [
"pkg/chunk",
@@ -211,10 +211,9 @@
"pkg/util/middleware",
"pkg/util/spanlogger",
"pkg/util/validation",
- "pkg/util/wire",
]
pruneopts = "UT"
- revision = "161f6716cba9a32f07f359c4f9f8578e0c5d5ae8"
+ revision = "95a3f308e95617732b76e337874e83ccf173cf14"
source = "https://github.com/grafana/cortex"
[[projects]]
@@ -1367,7 +1366,6 @@
"github.com/cortexproject/cortex/pkg/util",
"github.com/cortexproject/cortex/pkg/util/flagext",
"github.com/cortexproject/cortex/pkg/util/validation",
- "github.com/cortexproject/cortex/pkg/util/wire",
"github.com/fatih/color",
"github.com/go-kit/kit/log",
"github.com/go-kit/kit/log/level",
@@ -1391,10 +1389,12 @@
"github.com/prometheus/prometheus/discovery/targetgroup",
"github.com/prometheus/prometheus/pkg/labels",
"github.com/prometheus/prometheus/pkg/relabel",
+ "github.com/prometheus/prometheus/pkg/textparse",
"github.com/prometheus/prometheus/relabel",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"github.com/weaveworks/common/httpgrpc",
+ "github.com/weaveworks/common/httpgrpc/server",
"github.com/weaveworks/common/middleware",
"github.com/weaveworks/common/server",
"github.com/weaveworks/common/tracing",
diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go
index 1cff642f9294c..55a777f7d1916 100644
--- a/pkg/ingester/flush.go
+++ b/pkg/ingester/flush.go
@@ -184,7 +184,7 @@ func (i *Ingester) flushUserSeries(userID string, fp model.Fingerprint, immediat
return nil
}
-func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint, immediate bool) ([]*chunkDesc, []client.LabelPair) {
+func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint, immediate bool) ([]*chunkDesc, []client.LabelAdapter) {
instance.streamsMtx.Lock()
defer instance.streamsMtx.Unlock()
@@ -234,18 +234,18 @@ func (i *Ingester) removeFlushedChunks(instance *instance, stream *stream) {
if len(stream.chunks) == 0 {
delete(instance.streams, stream.fp)
- instance.index.Delete(client.FromLabelPairsToLabels(stream.labels), stream.fp)
+ instance.index.Delete(client.FromLabelAdaptersToLabels(stream.labels), stream.fp)
instance.streamsRemovedTotal.Inc()
}
}
-func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs []client.LabelPair, cs []*chunkDesc) error {
+func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelPairs []client.LabelAdapter, cs []*chunkDesc) error {
userID, err := user.ExtractOrgID(ctx)
if err != nil {
return err
}
- metric := fromLabelPairs(labelPairs)
+ metric := client.FromLabelAdaptersToMetric(labelPairs)
metric[nameLabel] = logsValue
wireChunks := make([]chunk.Chunk, 0, len(cs))
@@ -288,11 +288,3 @@ func (i *Ingester) flushChunks(ctx context.Context, fp model.Fingerprint, labelP
return nil
}
-
-func fromLabelPairs(ls []client.LabelPair) model.Metric {
- m := make(model.Metric, len(ls))
- for _, l := range ls {
- m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
- }
- return m
-}
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index f2a6529ac202c..93d8453c581d8 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -47,7 +47,7 @@ type stream struct {
// Not thread-safe; assume accesses to this are locked by caller.
chunks []chunkDesc
fp model.Fingerprint
- labels []client.LabelPair
+ labels []client.LabelAdapter
}
type chunkDesc struct {
@@ -58,7 +58,7 @@ type chunkDesc struct {
lastUpdated time.Time
}
-func newStream(fp model.Fingerprint, labels []client.LabelPair) *stream {
+func newStream(fp model.Fingerprint, labels []client.LabelAdapter) *stream {
return &stream{
fp: fp,
labels: labels,
@@ -96,7 +96,7 @@ func (s *stream) Push(_ context.Context, entries []logproto.Entry) error {
}
if appendErr == chunkenc.ErrOutOfOrder {
- return httpgrpc.Errorf(http.StatusBadRequest, "entry out of order for stream: %s", client.FromLabelPairsToLabels(s.labels).String())
+ return httpgrpc.Errorf(http.StatusBadRequest, "entry out of order for stream: %s", client.FromLabelAdaptersToLabels(s.labels).String())
}
return appendErr
@@ -121,5 +121,5 @@ func (s *stream) Iterator(from, through time.Time, direction logproto.Direction)
}
}
- return iter.NewNonOverlappingIterator(iterators, client.FromLabelPairsToLabels(s.labels).String()), nil
+ return iter.NewNonOverlappingIterator(iterators, client.FromLabelAdaptersToLabels(s.labels).String()), nil
}
diff --git a/pkg/logproto/dep.go b/pkg/logproto/dep.go
index 68eb69c94481e..34d7bc3d65d17 100644
--- a/pkg/logproto/dep.go
+++ b/pkg/logproto/dep.go
@@ -2,6 +2,6 @@ package logproto
import (
// trick dep into including this, needed by the generated code.
- _ "github.com/cortexproject/cortex/pkg/util/wire"
+ _ "github.com/cortexproject/cortex/pkg/chunk/storage"
_ "github.com/gogo/protobuf/types"
)
diff --git a/pkg/util/conv.go b/pkg/util/conv.go
index b44bed5ce77ac..77dce97f31551 100644
--- a/pkg/util/conv.go
+++ b/pkg/util/conv.go
@@ -2,22 +2,21 @@ package util
import (
"github.com/cortexproject/cortex/pkg/ingester/client"
- "github.com/cortexproject/cortex/pkg/util/wire"
"github.com/grafana/loki/pkg/parser"
)
// ToClientLabels parses the labels and converts them to the Cortex type.
-func ToClientLabels(labels string) ([]client.LabelPair, error) {
+func ToClientLabels(labels string) ([]client.LabelAdapter, error) {
ls, err := parser.Labels(labels)
if err != nil {
return nil, err
}
- pairs := make([]client.LabelPair, 0, len(ls))
+ pairs := make([]client.LabelAdapter, 0, len(ls))
for i := 0; i < len(ls); i++ {
- pairs = append(pairs, client.LabelPair{
- Name: wire.Bytes(ls[i].Name),
- Value: wire.Bytes(ls[i].Value),
+ pairs = append(pairs, client.LabelAdapter{
+ Name: ls[i].Name,
+ Value: ls[i].Value,
})
}
return pairs, nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
index 67111722cd4d9..b3a82eb960ca6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
@@ -20,6 +20,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk/cache"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/extract"
+ "github.com/cortexproject/cortex/pkg/util/flagext"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/cortexproject/cortex/pkg/util/validation"
"github.com/weaveworks/common/httpgrpc"
@@ -58,25 +59,21 @@ type StoreConfig struct {
ChunkCacheConfig cache.Config
WriteDedupeCacheConfig cache.Config
- MinChunkAge time.Duration
- CardinalityCacheSize int
- CardinalityCacheValidity time.Duration
- CardinalityLimit int
-
+ MinChunkAge time.Duration
CacheLookupsOlderThan time.Duration
}
// RegisterFlags adds the flags required to config this to the given FlagSet
func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) {
cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("", "Cache config for chunks. ", f)
-
cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f)
f.DurationVar(&cfg.MinChunkAge, "store.min-chunk-age", 0, "Minimum time between chunk update and being saved to the store.")
- f.IntVar(&cfg.CardinalityCacheSize, "store.cardinality-cache-size", 0, "Size of in-memory cardinality cache, 0 to disable.")
- f.DurationVar(&cfg.CardinalityCacheValidity, "store.cardinality-cache-validity", 1*time.Hour, "Period for which entries in the cardinality cache are valid.")
- f.IntVar(&cfg.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.")
f.DurationVar(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", 0, "Cache index entries older than this period. 0 to disable.")
+
+ // Deprecated.
+ flagext.DeprecatedFlag(f, "store.cardinality-cache-size", "DEPRECATED. Use store.index-cache-size.enable-fifocache and store.cardinality-cache.fifocache.size instead.")
+ flagext.DeprecatedFlag(f, "store.cardinality-cache-validity", "DEPRECATED. Use store.index-cache-size.enable-fifocache and store.cardinality-cache.fifocache.duration instead.")
}
// store implements Store
@@ -211,7 +208,7 @@ func (c *store) validateQuery(ctx context.Context, from model.Time, through *mod
maxQueryLength := c.limits.MaxQueryLength(userID)
if maxQueryLength > 0 && (*through).Sub(from) > maxQueryLength {
- return "", nil, false, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, length > limit (%s > %s)", (*through).Sub(from), maxQueryLength)
+ return "", nil, false, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, (*through).Sub(from), maxQueryLength)
}
now := model.Now()
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go
index 29746f87e5670..0282c08e99763 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go
@@ -161,8 +161,9 @@ func (b *bigchunk) Len() int {
}
func (b *bigchunk) Size() int {
- sum := 0
+ sum := 2 // For the number of sub chunks.
for _, c := range b.chunks {
+ sum += 2 // For the length of the sub chunk.
sum += len(c.Bytes())
}
return sum
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go
index a43980bde1ac0..adf155f6c5e5f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go
@@ -114,7 +114,7 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun
decodeContext := chunk.NewDecodeContext()
var processingErr error
- var recievedChunks = 0
+ var receivedChunks = 0
// rows are returned in key order, not order in row list
err := table.ReadRows(ctx, page, func(row bigtable.Row) bool {
@@ -130,7 +130,7 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun
return false
}
- recievedChunks++
+ receivedChunks++
outs <- chunk
return true
})
@@ -139,8 +139,8 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun
errs <- processingErr
} else if err != nil {
errs <- errors.WithStack(err)
- } else if recievedChunks < len(page) {
- errs <- errors.WithStack(fmt.Errorf("Asked for %d chunks for Bigtable, received %d", len(page), recievedChunks))
+ } else if receivedChunks < len(page) {
+ errs <- errors.WithStack(fmt.Errorf("Asked for %d chunks for Bigtable, received %d", len(page), receivedChunks))
}
}(page)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go
index 93f36ab0f7a46..4a0b742e6af28 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go
@@ -23,7 +23,9 @@ import (
)
var (
- errCardinalityExceeded = errors.New("cardinality limit exceeded")
+ // ErrCardinalityExceeded is returned when the user reads a row that
+ // is too large.
+ ErrCardinalityExceeded = errors.New("cardinality limit exceeded")
indexLookupsPerQuery = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: "cortex",
@@ -57,8 +59,6 @@ var (
// seriesStore implements Store
type seriesStore struct {
store
- cardinalityCache *cache.FifoCache
-
writeDedupeCache cache.Cache
}
@@ -89,10 +89,6 @@ func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Ob
limits: limits,
Fetcher: fetcher,
},
- cardinalityCache: cache.NewFifoCache("cardinality", cache.FifoCacheConfig{
- Size: cfg.CardinalityCacheSize,
- Validity: cfg.CardinalityCacheValidity,
- }),
writeDedupeCache: writeDedupeCache,
}, nil
}
@@ -229,15 +225,21 @@ func (c *seriesStore) lookupSeriesByMetricNameMatchers(ctx context.Context, from
ids = intersectStrings(ids, incoming)
}
case err := <-incomingErrors:
- if err == errCardinalityExceeded {
+ // The idea is that if we have 2 matchers, and if one returns a lot of
+ // series and the other returns only 10 (a few), we don't lookup the first one at all.
+ // We just manually filter through the 10 series again using "filterChunksByMatchers",
+ // saving us from looking up and intersecting a lot of series.
+ if err == ErrCardinalityExceeded {
cardinalityExceededErrors++
} else {
lastErr = err
}
}
}
+
+ // But if every single matcher returns a lot of series, then it makes sense to abort the query.
if cardinalityExceededErrors == len(matchers) {
- return nil, errCardinalityExceeded
+ return nil, ErrCardinalityExceeded
} else if lastErr != nil {
return nil, lastErr
}
@@ -270,36 +272,12 @@ func (c *seriesStore) lookupSeriesByMetricNameMatcher(ctx context.Context, from,
}
level.Debug(log).Log("queries", len(queries))
- for _, query := range queries {
- value, ok := c.cardinalityCache.Get(ctx, query.HashValue)
- if !ok {
- continue
- }
- cardinality := value.(int)
- if cardinality > c.cfg.CardinalityLimit {
- return nil, errCardinalityExceeded
- }
- }
-
entries, err := c.lookupEntriesByQueries(ctx, queries)
if err != nil {
return nil, err
}
level.Debug(log).Log("entries", len(entries))
- // TODO This is not correct, will overcount for queries > 24hrs
- keys := make([]string, 0, len(queries))
- values := make([]interface{}, 0, len(queries))
- for _, query := range queries {
- keys = append(keys, query.HashValue)
- values = append(values, len(entries))
- }
- c.cardinalityCache.Put(ctx, keys, values)
-
- if len(entries) > c.cfg.CardinalityLimit {
- return nil, errCardinalityExceeded
- }
-
ids, err := c.parseIndexEntries(ctx, entries, matcher)
if err != nil {
return nil, err
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go
similarity index 97%
rename from vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go
rename to vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go
index dfabadd8e061f..c4804995ff82a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/wire/bytes.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/bytes.go
@@ -1,4 +1,4 @@
-package wire
+package storage
import (
"bytes"
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
index cc36d5cb27d8f..1469e92d75ed5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
@@ -3,6 +3,9 @@ package storage
import (
"time"
+ "github.com/cortexproject/cortex/pkg/util/flagext"
+ "github.com/cortexproject/cortex/pkg/util/validation"
+
"github.com/cortexproject/cortex/pkg/chunk/cache"
"github.com/cortexproject/cortex/pkg/chunk/gcp"
@@ -16,11 +19,15 @@ type fixture struct {
func (f fixture) Name() string { return "caching-store" }
func (f fixture) Clients() (chunk.IndexClient, chunk.ObjectClient, chunk.TableClient, chunk.SchemaConfig, error) {
+ limits, err := defaultLimits()
+ if err != nil {
+ return nil, nil, nil, chunk.SchemaConfig{}, err
+ }
indexClient, objectClient, tableClient, schemaConfig, err := f.fixture.Clients()
indexClient = newCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{
Size: 500,
Validity: 5 * time.Minute,
- }), 5*time.Minute)
+ }), 5*time.Minute, limits)
return indexClient, objectClient, tableClient, schemaConfig, err
}
func (f fixture) Teardown() error { return f.fixture.Teardown() }
@@ -29,3 +36,9 @@ func (f fixture) Teardown() error { return f.fixture.Teardown() }
var Fixtures = []testutils.Fixture{
fixture{gcp.Fixtures[0]},
}
+
+func defaultLimits() (*validation.Overrides, error) {
+ var defaults validation.Limits
+ flagext.DefaultValues(&defaults)
+ return validation.NewOverrides(defaults)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
index fcb9a02204a1d..c4df850b885b0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
@@ -5,15 +5,18 @@ import (
"sync"
"time"
+ "github.com/go-kit/kit/log/level"
+ proto "github.com/golang/protobuf/proto"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/weaveworks/common/user"
+
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/cache"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
- "github.com/go-kit/kit/log/level"
- proto "github.com/golang/protobuf/proto"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/cortexproject/cortex/pkg/util/validation"
)
var (
@@ -43,9 +46,10 @@ type cachingIndexClient struct {
chunk.IndexClient
cache cache.Cache
validity time.Duration
+ limits *validation.Overrides
}
-func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration) chunk.IndexClient {
+func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration, limits *validation.Overrides) chunk.IndexClient {
if c == nil {
return client
}
@@ -54,6 +58,7 @@ func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity tim
IndexClient: client,
cache: cache.NewSnappy(c),
validity: validity,
+ limits: limits,
}
}
@@ -65,6 +70,12 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
// We cache the entire row, so filter client side.
callback = chunk_util.QueryFilter(callback)
+ userID, err := user.ExtractOrgID(ctx)
+ if err != nil {
+ return err
+ }
+ cardinalityLimit := int32(s.limits.CardinalityLimit(userID))
+
// Build list of keys to lookup in the cache.
keys := make([]string, 0, len(queries))
queriesByKey := make(map[string][]chunk.IndexQuery, len(queries))
@@ -76,6 +87,10 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
batches, misses := s.cacheFetch(ctx, keys)
for _, batch := range batches {
+ if cardinalityLimit > 0 && batch.Cardinality > cardinalityLimit {
+ return chunk.ErrCardinalityExceeded
+ }
+
queries := queriesByKey[batch.Key]
for _, query := range queries {
callback(query, batch)
@@ -115,7 +130,7 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
results[key] = rb
}
- err := s.IndexClient.QueryPages(ctx, cacheableMissed, func(cacheableQuery chunk.IndexQuery, r chunk.ReadBatch) bool {
+ err = s.IndexClient.QueryPages(ctx, cacheableMissed, func(cacheableQuery chunk.IndexQuery, r chunk.ReadBatch) bool {
resultsMtx.Lock()
defer resultsMtx.Unlock()
key := queryKey(cacheableQuery)
@@ -135,9 +150,20 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
defer resultsMtx.Unlock()
keys := make([]string, 0, len(results))
batches := make([]ReadBatch, 0, len(results))
+ var cardinalityErr error
for key, batch := range results {
+ cardinality := int32(len(batch.Entries))
+ if cardinalityLimit > 0 && cardinality > cardinalityLimit {
+ batch.Cardinality = cardinality
+ batch.Entries = nil
+ cardinalityErr = chunk.ErrCardinalityExceeded
+ }
+
keys = append(keys, key)
batches = append(batches, batch)
+ if cardinalityErr != nil {
+ continue
+ }
queries := queriesByKey[key]
for _, query := range queries {
@@ -145,8 +171,8 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind
}
}
s.cacheStore(ctx, keys, batches)
+ return cardinalityErr
}
- return nil
}
// Iterator implements chunk.ReadBatch.
@@ -250,7 +276,6 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat
}
if readBatch.Expiry != 0 && time.Now().After(time.Unix(0, readBatch.Expiry)) {
- level.Debug(log).Log("msg", "dropping index cache entry due to expiration", "key", key, "readBatch.Key", readBatch.Key, "expiry", time.Unix(0, readBatch.Expiry))
continue
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go
index 7aa411fe2cd18..761e22f4b80a2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.pb.go
@@ -3,17 +3,15 @@
package storage
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import github_com_cortexproject_cortex_pkg_util_wire "github.com/cortexproject/cortex/pkg/util/wire"
-
-import strings "strings"
-import reflect "reflect"
-
-import io "io"
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ reflect "reflect"
+ strings "strings"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -27,14 +25,14 @@ var _ = math.Inf
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Entry struct {
- Column github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,1,opt,name=Column,json=column,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"Column"`
- Value github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,2,opt,name=Value,json=value,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"Value"`
+ Column Bytes `protobuf:"bytes,1,opt,name=Column,json=column,proto3,customtype=Bytes" json:"Column"`
+ Value Bytes `protobuf:"bytes,2,opt,name=Value,json=value,proto3,customtype=Bytes" json:"Value"`
}
func (m *Entry) Reset() { *m = Entry{} }
func (*Entry) ProtoMessage() {}
func (*Entry) Descriptor() ([]byte, []int) {
- return fileDescriptor_caching_index_client_2f4bf220288f700f, []int{0}
+ return fileDescriptor_a60039d4a2d816f6, []int{0}
}
func (m *Entry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -51,8 +49,8 @@ func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *Entry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Entry.Merge(dst, src)
+func (m *Entry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Entry.Merge(m, src)
}
func (m *Entry) XXX_Size() int {
return m.Size()
@@ -64,16 +62,19 @@ func (m *Entry) XXX_DiscardUnknown() {
var xxx_messageInfo_Entry proto.InternalMessageInfo
type ReadBatch struct {
- Entries []Entry `protobuf:"bytes,1,rep,name=entries" json:"entries"`
+ Entries []Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
// The time at which the key expires.
Expiry int64 `protobuf:"varint,3,opt,name=expiry,proto3" json:"expiry,omitempty"`
+ // The number of entries; used for cardinality limiting.
+ // entries will be empty when this is set.
+ Cardinality int32 `protobuf:"varint,4,opt,name=cardinality,proto3" json:"cardinality,omitempty"`
}
func (m *ReadBatch) Reset() { *m = ReadBatch{} }
func (*ReadBatch) ProtoMessage() {}
func (*ReadBatch) Descriptor() ([]byte, []int) {
- return fileDescriptor_caching_index_client_2f4bf220288f700f, []int{1}
+ return fileDescriptor_a60039d4a2d816f6, []int{1}
}
func (m *ReadBatch) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -90,8 +91,8 @@ func (m *ReadBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *ReadBatch) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReadBatch.Merge(dst, src)
+func (m *ReadBatch) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReadBatch.Merge(m, src)
}
func (m *ReadBatch) XXX_Size() int {
return m.Size()
@@ -123,10 +124,47 @@ func (m *ReadBatch) GetExpiry() int64 {
return 0
}
+func (m *ReadBatch) GetCardinality() int32 {
+ if m != nil {
+ return m.Cardinality
+ }
+ return 0
+}
+
func init() {
proto.RegisterType((*Entry)(nil), "storage.Entry")
proto.RegisterType((*ReadBatch)(nil), "storage.ReadBatch")
}
+
+func init() {
+ proto.RegisterFile("github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto", fileDescriptor_a60039d4a2d816f6)
+}
+
+var fileDescriptor_a60039d4a2d816f6 = []byte{
+ // 335 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xb1, 0x4e, 0xc3, 0x30,
+ 0x00, 0x44, 0x63, 0xd2, 0xa4, 0xaa, 0x0b, 0x08, 0x65, 0x40, 0x11, 0x83, 0x1b, 0x15, 0x21, 0x65,
+ 0x21, 0x91, 0x80, 0x2f, 0x08, 0x62, 0x63, 0x0a, 0x12, 0x6b, 0xe5, 0xba, 0x26, 0x31, 0x4d, 0xed,
+ 0xc8, 0x75, 0x50, 0xb3, 0xb1, 0xb1, 0xf2, 0x19, 0x7c, 0x4a, 0xc7, 0x8e, 0x15, 0x43, 0x45, 0xdd,
+ 0x85, 0xb1, 0x9f, 0x80, 0x6a, 0x82, 0xd4, 0x81, 0xed, 0x9e, 0xef, 0x7c, 0x67, 0x19, 0xde, 0x67,
+ 0x4c, 0xe5, 0xd5, 0x30, 0x22, 0x62, 0x12, 0x13, 0x21, 0x15, 0x9d, 0x95, 0x52, 0x3c, 0x53, 0xa2,
+ 0x1a, 0x8a, 0xcb, 0x71, 0x16, 0x93, 0xbc, 0xe2, 0xe3, 0x78, 0xaa, 0x84, 0xc4, 0x19, 0x8d, 0x09,
+ 0x26, 0x39, 0xe3, 0xd9, 0x80, 0xf1, 0x11, 0x9d, 0x0d, 0x48, 0xc1, 0x28, 0x57, 0x51, 0x29, 0x85,
+ 0x12, 0x5e, 0xbb, 0xc9, 0x9c, 0x5d, 0xee, 0xd5, 0x66, 0x22, 0x13, 0xb1, 0xf1, 0x87, 0xd5, 0x93,
+ 0x21, 0x03, 0x46, 0xfd, 0xde, 0xeb, 0x3f, 0x40, 0xe7, 0x8e, 0x2b, 0x59, 0x7b, 0x17, 0xd0, 0xbd,
+ 0x15, 0x45, 0x35, 0xe1, 0x3e, 0x08, 0x40, 0x78, 0x98, 0x1c, 0xcd, 0x57, 0x3d, 0xeb, 0x73, 0xd5,
+ 0x73, 0x92, 0x5a, 0xd1, 0x69, 0xea, 0x12, 0x63, 0x7a, 0xe7, 0xd0, 0x79, 0xc4, 0x45, 0x45, 0xfd,
+ 0x83, 0xff, 0x52, 0xce, 0xcb, 0xce, 0xeb, 0xbf, 0x01, 0xd8, 0x49, 0x29, 0x1e, 0x25, 0x58, 0x91,
+ 0xdc, 0x8b, 0x60, 0x9b, 0x72, 0x25, 0x19, 0x9d, 0xfa, 0x20, 0xb0, 0xc3, 0xee, 0xd5, 0x71, 0xd4,
+ 0x3c, 0x36, 0x32, 0xd3, 0x49, 0x6b, 0x57, 0x92, 0xfe, 0x85, 0xbc, 0x13, 0x68, 0x8f, 0x69, 0x6d,
+ 0x06, 0x3a, 0xe9, 0x4e, 0x7a, 0xa7, 0xd0, 0xa5, 0xb3, 0x92, 0xc9, 0xda, 0xb7, 0x03, 0x10, 0xda,
+ 0x69, 0x43, 0x5e, 0x00, 0xbb, 0x04, 0xcb, 0x11, 0xe3, 0xb8, 0x60, 0xaa, 0xf6, 0x5b, 0x01, 0x08,
+ 0x9d, 0x74, 0xff, 0x28, 0xb9, 0x59, 0xac, 0x91, 0xb5, 0x5c, 0x23, 0x6b, 0xbb, 0x46, 0xe0, 0x55,
+ 0x23, 0xf0, 0xa1, 0x11, 0x98, 0x6b, 0x04, 0x16, 0x1a, 0x81, 0x2f, 0x8d, 0xc0, 0xb7, 0x46, 0xd6,
+ 0x56, 0x23, 0xf0, 0xbe, 0x41, 0xd6, 0x62, 0x83, 0xac, 0xe5, 0x06, 0x59, 0x43, 0xd7, 0xfc, 0xcd,
+ 0xf5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4b, 0xd2, 0x5d, 0xd9, 0xa3, 0x01, 0x00, 0x00,
+}
+
func (this *Entry) Equal(that interface{}) bool {
if that == nil {
return this == nil
@@ -187,6 +225,9 @@ func (this *ReadBatch) Equal(that interface{}) bool {
if this.Expiry != that1.Expiry {
return false
}
+ if this.Cardinality != that1.Cardinality {
+ return false
+ }
return true
}
func (this *Entry) GoString() string {
@@ -204,7 +245,7 @@ func (this *ReadBatch) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 7)
+ s := make([]string, 0, 8)
s = append(s, "&storage.ReadBatch{")
if this.Entries != nil {
vs := make([]*Entry, len(this.Entries))
@@ -215,6 +256,7 @@ func (this *ReadBatch) GoString() string {
}
s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
s = append(s, "Expiry: "+fmt.Sprintf("%#v", this.Expiry)+",\n")
+ s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -244,17 +286,17 @@ func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa
i++
i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Column.Size()))
- n1, err := m.Column.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ n1, err1 := m.Column.MarshalTo(dAtA[i:])
+ if err1 != nil {
+ return 0, err1
}
i += n1
dAtA[i] = 0x12
i++
i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Value.Size()))
- n2, err := m.Value.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ n2, err2 := m.Value.MarshalTo(dAtA[i:])
+ if err2 != nil {
+ return 0, err2
}
i += n2
return i, nil
@@ -298,6 +340,11 @@ func (m *ReadBatch) MarshalTo(dAtA []byte) (int, error) {
i++
i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Expiry))
}
+ if m.Cardinality != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintCachingIndexClient(dAtA, i, uint64(m.Cardinality))
+ }
return i, nil
}
@@ -342,6 +389,9 @@ func (m *ReadBatch) Size() (n int) {
if m.Expiry != 0 {
n += 1 + sovCachingIndexClient(uint64(m.Expiry))
}
+ if m.Cardinality != 0 {
+ n += 1 + sovCachingIndexClient(uint64(m.Cardinality))
+ }
return n
}
@@ -373,10 +423,16 @@ func (this *ReadBatch) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForEntries := "[]Entry{"
+ for _, f := range this.Entries {
+ repeatedStringForEntries += strings.Replace(strings.Replace(f.String(), "Entry", "Entry", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForEntries += "}"
s := strings.Join([]string{`&ReadBatch{`,
- `Entries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Entries), "Entry", "Entry", 1), `&`, ``, 1) + `,`,
+ `Entries:` + repeatedStringForEntries + `,`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`Expiry:` + fmt.Sprintf("%v", this.Expiry) + `,`,
+ `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`,
`}`,
}, "")
return s
@@ -404,7 +460,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -432,7 +488,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -441,6 +497,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCachingIndexClient
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCachingIndexClient
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -462,7 +521,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -471,6 +530,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCachingIndexClient
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCachingIndexClient
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -487,6 +549,9 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCachingIndexClient
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCachingIndexClient
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -514,7 +579,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -542,7 +607,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -551,6 +616,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCachingIndexClient
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCachingIndexClient
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -573,7 +641,7 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -583,6 +651,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCachingIndexClient
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCachingIndexClient
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -602,7 +673,26 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Expiry |= (int64(b) & 0x7F) << shift
+ m.Expiry |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType)
+ }
+ m.Cardinality = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCachingIndexClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Cardinality |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -616,6 +706,9 @@ func (m *ReadBatch) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCachingIndexClient
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCachingIndexClient
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -682,10 +775,13 @@ func skipCachingIndexClient(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthCachingIndexClient
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthCachingIndexClient
+ }
return iNdEx, nil
case 3:
for {
@@ -714,6 +810,9 @@ func skipCachingIndexClient(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthCachingIndexClient
+ }
}
return iNdEx, nil
case 4:
@@ -732,32 +831,3 @@ var (
ErrInvalidLengthCachingIndexClient = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowCachingIndexClient = fmt.Errorf("proto: integer overflow")
)
-
-func init() {
- proto.RegisterFile("github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto", fileDescriptor_caching_index_client_2f4bf220288f700f)
-}
-
-var fileDescriptor_caching_index_client_2f4bf220288f700f = []byte{
- // 331 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x90, 0xb1, 0x4e, 0xeb, 0x30,
- 0x14, 0x86, 0xe3, 0x9b, 0xdb, 0x54, 0x35, 0x08, 0xa1, 0x0c, 0x28, 0x62, 0x70, 0xab, 0x4e, 0x5d,
- 0x88, 0x25, 0xca, 0xc6, 0x16, 0xc4, 0xc6, 0x42, 0x90, 0x58, 0xab, 0xd4, 0x3d, 0x24, 0xa6, 0xa9,
- 0x1d, 0xb9, 0x0e, 0x34, 0x1b, 0x8f, 0xc0, 0x63, 0xb0, 0xf1, 0x1a, 0x1d, 0x3b, 0x56, 0x0c, 0x15,
- 0x75, 0x17, 0xc6, 0x3e, 0x02, 0xaa, 0x09, 0x12, 0x23, 0x12, 0xdb, 0xf9, 0xe4, 0xe3, 0xcf, 0xbf,
- 0x7f, 0x7c, 0x95, 0x72, 0x9d, 0x95, 0xc3, 0x90, 0xc9, 0x09, 0x65, 0x52, 0x69, 0x98, 0x15, 0x4a,
- 0xde, 0x03, 0xd3, 0x35, 0xd1, 0x62, 0x9c, 0x52, 0x96, 0x95, 0x62, 0x4c, 0xa7, 0x5a, 0xaa, 0x24,
- 0x05, 0xca, 0x12, 0x96, 0x71, 0x91, 0x0e, 0xb8, 0x18, 0xc1, 0x6c, 0xc0, 0x72, 0x0e, 0x42, 0x87,
- 0x85, 0x92, 0x5a, 0xfa, 0xcd, 0x7a, 0xe7, 0xf8, 0xe4, 0x87, 0x36, 0x95, 0xa9, 0xa4, 0xf6, 0x7c,
- 0x58, 0xde, 0x59, 0xb2, 0x60, 0xa7, 0xaf, 0x7b, 0xdd, 0x57, 0x84, 0x1b, 0x97, 0x42, 0xab, 0xca,
- 0xbf, 0xc1, 0xde, 0x85, 0xcc, 0xcb, 0x89, 0x08, 0x50, 0x07, 0xf5, 0xf6, 0xa3, 0xf3, 0xf9, 0xaa,
- 0xed, 0xbc, 0xad, 0xda, 0xfd, 0xdf, 0xe4, 0x2c, 0x35, 0xcf, 0xe9, 0x23, 0x57, 0x10, 0x46, 0x95,
- 0x86, 0x69, 0xec, 0x31, 0xab, 0xf2, 0xaf, 0x71, 0xe3, 0x36, 0xc9, 0x4b, 0x08, 0xfe, 0xfd, 0xdd,
- 0xd9, 0x78, 0xd8, 0x99, 0xba, 0x80, 0x5b, 0x31, 0x24, 0xa3, 0x28, 0xd1, 0x2c, 0xf3, 0x43, 0xdc,
- 0x04, 0xa1, 0x15, 0x87, 0x69, 0x80, 0x3a, 0x6e, 0x6f, 0xef, 0xf4, 0x20, 0xac, 0x8b, 0x08, 0xed,
- 0xaf, 0xa2, 0xff, 0xbb, 0x17, 0xe3, 0xef, 0x25, 0xff, 0x10, 0xbb, 0x63, 0xa8, 0x6c, 0x9a, 0x56,
- 0xbc, 0x1b, 0xfd, 0x23, 0xec, 0xc1, 0xac, 0xe0, 0xaa, 0x0a, 0xdc, 0x0e, 0xea, 0xb9, 0x71, 0x4d,
- 0xd1, 0xd9, 0x62, 0x4d, 0x9c, 0xe5, 0x9a, 0x38, 0xdb, 0x35, 0x41, 0x4f, 0x86, 0xa0, 0x17, 0x43,
- 0xd0, 0xdc, 0x10, 0xb4, 0x30, 0x04, 0xbd, 0x1b, 0x82, 0x3e, 0x0c, 0x71, 0xb6, 0x86, 0xa0, 0xe7,
- 0x0d, 0x71, 0x16, 0x1b, 0xe2, 0x2c, 0x37, 0xc4, 0x19, 0x7a, 0xb6, 0xd5, 0xfe, 0x67, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x95, 0x6d, 0x6d, 0xd0, 0xdd, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto
index 1c22c94c8ab51..22a9d01ffaff4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.proto
@@ -8,8 +8,8 @@ option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
message Entry {
- bytes Column = 1 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false];
- bytes Value = 2 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false];
+ bytes Column = 1 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false];
+ bytes Value = 2 [(gogoproto.customtype) = "Bytes", (gogoproto.nullable) = false];
}
message ReadBatch {
@@ -18,4 +18,8 @@ message ReadBatch {
// The time at which the key expires.
int64 expiry = 3;
+
+ // The number of entries; used for cardinality limiting.
+ // entries will be empty when this is set.
+ int32 cardinality = 4;
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
index d45ed09de02e7..a0fd1b41406ac 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
@@ -98,7 +98,7 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf
if err != nil {
return nil, errors.Wrap(err, "error creating index client")
}
- index = newCachingIndexClient(index, tieredCache, cfg.IndexCacheValidity)
+ index = newCachingIndexClient(index, tieredCache, cfg.IndexCacheValidity, limits)
objectStoreType := s.ObjectType
if objectStoreType == "" {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
index f177eefabf318..f95d1e27877b1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
@@ -1,11 +1,11 @@
package client
import (
- "bytes"
stdjson "encoding/json"
"fmt"
"sort"
"strconv"
+ "strings"
"time"
"unsafe"
@@ -16,22 +16,6 @@ import (
var json = jsoniter.ConfigCompatibleWithStandardLibrary
-// FromWriteRequest converts a WriteRequest proto into an array of samples.
-func FromWriteRequest(req *WriteRequest) []model.Sample {
- // Just guess that there is one sample per timeseries
- samples := make([]model.Sample, 0, len(req.Timeseries))
- for _, ts := range req.Timeseries {
- for _, s := range ts.Samples {
- samples = append(samples, model.Sample{
- Metric: FromLabelPairs(ts.Labels),
- Value: model.SampleValue(s.Value),
- Timestamp: model.Time(s.TimestampMs),
- })
- }
- }
- return samples
-}
-
// ToWriteRequest converts an array of samples into a WriteRequest proto.
func ToWriteRequest(samples []model.Sample, source WriteRequest_SourceEnum) *WriteRequest {
req := &WriteRequest{
@@ -42,7 +26,7 @@ func ToWriteRequest(samples []model.Sample, source WriteRequest_SourceEnum) *Wri
for _, s := range samples {
ts := PreallocTimeseries{
TimeSeries: TimeSeries{
- Labels: ToLabelPairs(s.Metric),
+ Labels: FromMetricsToLabelAdapters(s.Metric),
Samples: []Sample{
{
Value: float64(s.Value),
@@ -87,7 +71,7 @@ func ToQueryResponse(matrix model.Matrix) *QueryResponse {
resp := &QueryResponse{}
for _, ss := range matrix {
ts := TimeSeries{
- Labels: ToLabelPairs(ss.Metric),
+ Labels: FromMetricsToLabelAdapters(ss.Metric),
Samples: make([]Sample, 0, len(ss.Values)),
}
for _, s := range ss.Values {
@@ -106,7 +90,7 @@ func FromQueryResponse(resp *QueryResponse) model.Matrix {
m := make(model.Matrix, 0, len(resp.Timeseries))
for _, ts := range resp.Timeseries {
var ss model.SampleStream
- ss.Metric = FromLabelPairs(ts.Labels)
+ ss.Metric = FromLabelAdaptersToMetric(ts.Labels)
ss.Values = make([]model.SamplePair, 0, len(ts.Samples))
for _, s := range ts.Samples {
ss.Values = append(ss.Values, model.SamplePair{
@@ -153,7 +137,7 @@ func FromMetricsForLabelMatchersRequest(req *MetricsForLabelMatchersRequest) (mo
func FromMetricsForLabelMatchersResponse(resp *MetricsForLabelMatchersResponse) []model.Metric {
metrics := []model.Metric{}
for _, m := range resp.Metric {
- metrics = append(metrics, FromLabelPairs(m.Labels))
+ metrics = append(metrics, FromLabelAdaptersToMetric(m.Labels))
}
return metrics
}
@@ -208,70 +192,63 @@ func fromLabelMatchers(matchers []*LabelMatcher) ([]*labels.Matcher, error) {
return result, nil
}
-// ToLabelPairs builds a []LabelPair from a model.Metric
-func ToLabelPairs(metric model.Metric) []LabelPair {
- labelPairs := make([]LabelPair, 0, len(metric))
- for k, v := range metric {
- labelPairs = append(labelPairs, LabelPair{
- Name: []byte(k),
- Value: []byte(v),
- })
- }
- sort.Sort(byLabel(labelPairs)) // The labels should be sorted upon initialisation.
- return labelPairs
+// FromLabelAdaptersToLabels casts []LabelAdapter to labels.Labels.
+// It uses unsafe, but as LabelAdapter == labels.Label this should be safe.
+// This allows us to use labels.Labels directly in protos.
+func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels {
+ return *(*labels.Labels)(unsafe.Pointer(&ls))
}
-type byLabel []LabelPair
-
-func (s byLabel) Len() int { return len(s) }
-func (s byLabel) Less(i, j int) bool { return bytes.Compare(s[i].Name, s[j].Name) < 0 }
-func (s byLabel) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// FromLabelPairs unpack a []LabelPair to a model.Metric
-func FromLabelPairs(labelPairs []LabelPair) model.Metric {
- metric := make(model.Metric, len(labelPairs))
- for _, l := range labelPairs {
- metric[model.LabelName(l.Name)] = model.LabelValue(l.Value)
- }
- return metric
+// FromLabelsToLabelAdapaters casts labels.Labels to []LabelAdapter.
+// It uses unsafe, but as LabelAdapter == labels.Label this should be safe.
+// This allows us to use labels.Labels directly in protos.
+func FromLabelsToLabelAdapaters(ls labels.Labels) []LabelAdapter {
+ return *(*[]LabelAdapter)(unsafe.Pointer(&ls))
}
-// FromLabelPairsToLabels unpack a []LabelPair to a labels.Labels
-func FromLabelPairsToLabels(labelPairs []LabelPair) labels.Labels {
- ls := make(labels.Labels, 0, len(labelPairs))
- for _, l := range labelPairs {
- ls = append(ls, labels.Label{
- Name: string(l.Name),
- Value: string(l.Value),
- })
+// FromLabelAdaptersToMetric converts []LabelAdapter to a model.Metric.
+// Don't do this on any performance sensitive paths.
+func FromLabelAdaptersToMetric(ls []LabelAdapter) model.Metric {
+ result := make(model.Metric, len(ls))
+ for _, l := range ls {
+ result[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
- return ls
+ return result
}
-// FromLabelsToLabelPairs converts labels.Labels to []LabelPair
-func FromLabelsToLabelPairs(s labels.Labels) []LabelPair {
- labelPairs := make([]LabelPair, 0, len(s))
- for _, v := range s {
- labelPairs = append(labelPairs, LabelPair{
- Name: []byte(v.Name),
- Value: []byte(v.Value),
+// FromMetricsToLabelAdapters converts model.Metric to []LabelAdapter.
+// Don't do this on any performance sensitive paths.
+// The result is sorted.
+func FromMetricsToLabelAdapters(metric model.Metric) []LabelAdapter {
+ result := make([]LabelAdapter, 0, len(metric))
+ for k, v := range metric {
+ result = append(result, LabelAdapter{
+ Name: string(k),
+ Value: string(v),
})
}
- return labelPairs // note already sorted
+ sort.Sort(byLabel(result)) // The labels should be sorted upon initialisation.
+ return result
}
+type byLabel []LabelAdapter
+
+func (s byLabel) Len() int { return len(s) }
+func (s byLabel) Less(i, j int) bool { return strings.Compare(s[i].Name, s[j].Name) < 0 }
+func (s byLabel) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
// FastFingerprint runs the same algorithm as Prometheus labelSetToFastFingerprint()
-func FastFingerprint(labelPairs []LabelPair) model.Fingerprint {
- if len(labelPairs) == 0 {
+func FastFingerprint(ls []LabelAdapter) model.Fingerprint {
+ if len(ls) == 0 {
return model.Metric(nil).FastFingerprint()
}
var result uint64
- for _, pair := range labelPairs {
+ for _, l := range ls {
sum := hashNew()
- sum = hashAdd(sum, pair.Name)
+ sum = hashAdd(sum, l.Name)
sum = hashAddByte(sum, model.SeparatorByte)
- sum = hashAdd(sum, pair.Value)
+ sum = hashAdd(sum, l.Value)
result ^= sum
}
return model.Fingerprint(result)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go
index a7d5490231d1a..b7a9689fb025b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go
@@ -3,29 +3,21 @@
package client
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import github_com_cortexproject_cortex_pkg_util_wire "github.com/cortexproject/cortex/pkg/util/wire"
-
-import strconv "strconv"
-
-import bytes "bytes"
-
-import strings "strings"
-import reflect "reflect"
-
import (
- context "golang.org/x/net/context"
+ bytes "bytes"
+ context "context"
+ encoding_binary "encoding/binary"
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
grpc "google.golang.org/grpc"
+ io "io"
+ math "math"
+ reflect "reflect"
+ strconv "strconv"
+ strings "strings"
)
-import encoding_binary "encoding/binary"
-
-import io "io"
-
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
@@ -52,6 +44,7 @@ var MatchType_name = map[int32]string{
2: "REGEX_MATCH",
3: "REGEX_NO_MATCH",
}
+
var MatchType_value = map[string]int32{
"EQUAL": 0,
"NOT_EQUAL": 1,
@@ -60,7 +53,7 @@ var MatchType_value = map[string]int32{
}
func (MatchType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{0}
+ return fileDescriptor_db0f8a1e534b119a, []int{0}
}
type WriteRequest_SourceEnum int32
@@ -74,24 +67,25 @@ var WriteRequest_SourceEnum_name = map[int32]string{
0: "API",
1: "RULE",
}
+
var WriteRequest_SourceEnum_value = map[string]int32{
"API": 0,
"RULE": 1,
}
func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{0, 0}
+ return fileDescriptor_db0f8a1e534b119a, []int{0, 0}
}
type WriteRequest struct {
- Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,customtype=PreallocTimeseries" json:"timeseries"`
+ Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"`
Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,json=source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"`
}
func (m *WriteRequest) Reset() { *m = WriteRequest{} }
func (*WriteRequest) ProtoMessage() {}
func (*WriteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{0}
+ return fileDescriptor_db0f8a1e534b119a, []int{0}
}
func (m *WriteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -108,8 +102,8 @@ func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return b[:n], nil
}
}
-func (dst *WriteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WriteRequest.Merge(dst, src)
+func (m *WriteRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WriteRequest.Merge(m, src)
}
func (m *WriteRequest) XXX_Size() int {
return m.Size()
@@ -133,7 +127,7 @@ type WriteResponse struct {
func (m *WriteResponse) Reset() { *m = WriteResponse{} }
func (*WriteResponse) ProtoMessage() {}
func (*WriteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{1}
+ return fileDescriptor_db0f8a1e534b119a, []int{1}
}
func (m *WriteResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -150,8 +144,8 @@ func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return b[:n], nil
}
}
-func (dst *WriteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WriteResponse.Merge(dst, src)
+func (m *WriteResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WriteResponse.Merge(m, src)
}
func (m *WriteResponse) XXX_Size() int {
return m.Size()
@@ -163,13 +157,13 @@ func (m *WriteResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_WriteResponse proto.InternalMessageInfo
type ReadRequest struct {
- Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"`
+ Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{2}
+ return fileDescriptor_db0f8a1e534b119a, []int{2}
}
func (m *ReadRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -186,8 +180,8 @@ func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return b[:n], nil
}
}
-func (dst *ReadRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReadRequest.Merge(dst, src)
+func (m *ReadRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReadRequest.Merge(m, src)
}
func (m *ReadRequest) XXX_Size() int {
return m.Size()
@@ -206,13 +200,13 @@ func (m *ReadRequest) GetQueries() []*QueryRequest {
}
type ReadResponse struct {
- Results []*QueryResponse `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"`
+ Results []*QueryResponse `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{3}
+ return fileDescriptor_db0f8a1e534b119a, []int{3}
}
func (m *ReadResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -229,8 +223,8 @@ func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return b[:n], nil
}
}
-func (dst *ReadResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReadResponse.Merge(dst, src)
+func (m *ReadResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ReadResponse.Merge(m, src)
}
func (m *ReadResponse) XXX_Size() int {
return m.Size()
@@ -251,13 +245,13 @@ func (m *ReadResponse) GetResults() []*QueryResponse {
type QueryRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"`
- Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
+ Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"`
}
func (m *QueryRequest) Reset() { *m = QueryRequest{} }
func (*QueryRequest) ProtoMessage() {}
func (*QueryRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{4}
+ return fileDescriptor_db0f8a1e534b119a, []int{4}
}
func (m *QueryRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -274,8 +268,8 @@ func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return b[:n], nil
}
}
-func (dst *QueryRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryRequest.Merge(dst, src)
+func (m *QueryRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryRequest.Merge(m, src)
}
func (m *QueryRequest) XXX_Size() int {
return m.Size()
@@ -308,13 +302,13 @@ func (m *QueryRequest) GetMatchers() []*LabelMatcher {
}
type QueryResponse struct {
- Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries"`
+ Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
}
func (m *QueryResponse) Reset() { *m = QueryResponse{} }
func (*QueryResponse) ProtoMessage() {}
func (*QueryResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{5}
+ return fileDescriptor_db0f8a1e534b119a, []int{5}
}
func (m *QueryResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -331,8 +325,8 @@ func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return b[:n], nil
}
}
-func (dst *QueryResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryResponse.Merge(dst, src)
+func (m *QueryResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryResponse.Merge(m, src)
}
func (m *QueryResponse) XXX_Size() int {
return m.Size()
@@ -352,13 +346,13 @@ func (m *QueryResponse) GetTimeseries() []TimeSeries {
// QueryStreamResponse contains a batch of timeseries chunks.
type QueryStreamResponse struct {
- Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries"`
+ Timeseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
}
func (m *QueryStreamResponse) Reset() { *m = QueryStreamResponse{} }
func (*QueryStreamResponse) ProtoMessage() {}
func (*QueryStreamResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{6}
+ return fileDescriptor_db0f8a1e534b119a, []int{6}
}
func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -375,8 +369,8 @@ func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return b[:n], nil
}
}
-func (dst *QueryStreamResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryStreamResponse.Merge(dst, src)
+func (m *QueryStreamResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryStreamResponse.Merge(m, src)
}
func (m *QueryStreamResponse) XXX_Size() int {
return m.Size()
@@ -401,7 +395,7 @@ type LabelValuesRequest struct {
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{7}
+ return fileDescriptor_db0f8a1e534b119a, []int{7}
}
func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -418,8 +412,8 @@ func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return b[:n], nil
}
}
-func (dst *LabelValuesRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelValuesRequest.Merge(dst, src)
+func (m *LabelValuesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelValuesRequest.Merge(m, src)
}
func (m *LabelValuesRequest) XXX_Size() int {
return m.Size()
@@ -438,13 +432,13 @@ func (m *LabelValuesRequest) GetLabelName() string {
}
type LabelValuesResponse struct {
- LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
+ LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{8}
+ return fileDescriptor_db0f8a1e534b119a, []int{8}
}
func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -461,8 +455,8 @@ func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return b[:n], nil
}
}
-func (dst *LabelValuesResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelValuesResponse.Merge(dst, src)
+func (m *LabelValuesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelValuesResponse.Merge(m, src)
}
func (m *LabelValuesResponse) XXX_Size() int {
return m.Size()
@@ -486,7 +480,7 @@ type LabelNamesRequest struct {
func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} }
func (*LabelNamesRequest) ProtoMessage() {}
func (*LabelNamesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{9}
+ return fileDescriptor_db0f8a1e534b119a, []int{9}
}
func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -503,8 +497,8 @@ func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return b[:n], nil
}
}
-func (dst *LabelNamesRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelNamesRequest.Merge(dst, src)
+func (m *LabelNamesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelNamesRequest.Merge(m, src)
}
func (m *LabelNamesRequest) XXX_Size() int {
return m.Size()
@@ -516,13 +510,13 @@ func (m *LabelNamesRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo
type LabelNamesResponse struct {
- LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames" json:"label_names,omitempty"`
+ LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"`
}
func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} }
func (*LabelNamesResponse) ProtoMessage() {}
func (*LabelNamesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{10}
+ return fileDescriptor_db0f8a1e534b119a, []int{10}
}
func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -539,8 +533,8 @@ func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return b[:n], nil
}
}
-func (dst *LabelNamesResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelNamesResponse.Merge(dst, src)
+func (m *LabelNamesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelNamesResponse.Merge(m, src)
}
func (m *LabelNamesResponse) XXX_Size() int {
return m.Size()
@@ -564,7 +558,7 @@ type UserStatsRequest struct {
func (m *UserStatsRequest) Reset() { *m = UserStatsRequest{} }
func (*UserStatsRequest) ProtoMessage() {}
func (*UserStatsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{11}
+ return fileDescriptor_db0f8a1e534b119a, []int{11}
}
func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -581,8 +575,8 @@ func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return b[:n], nil
}
}
-func (dst *UserStatsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserStatsRequest.Merge(dst, src)
+func (m *UserStatsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserStatsRequest.Merge(m, src)
}
func (m *UserStatsRequest) XXX_Size() int {
return m.Size()
@@ -603,7 +597,7 @@ type UserStatsResponse struct {
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{12}
+ return fileDescriptor_db0f8a1e534b119a, []int{12}
}
func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -620,8 +614,8 @@ func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, e
return b[:n], nil
}
}
-func (dst *UserStatsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserStatsResponse.Merge(dst, src)
+func (m *UserStatsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserStatsResponse.Merge(m, src)
}
func (m *UserStatsResponse) XXX_Size() int {
return m.Size()
@@ -662,13 +656,13 @@ func (m *UserStatsResponse) GetRuleIngestionRate() float64 {
type UserIDStatsResponse struct {
UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
- Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+ Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *UserIDStatsResponse) Reset() { *m = UserIDStatsResponse{} }
func (*UserIDStatsResponse) ProtoMessage() {}
func (*UserIDStatsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{13}
+ return fileDescriptor_db0f8a1e534b119a, []int{13}
}
func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -685,8 +679,8 @@ func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return b[:n], nil
}
}
-func (dst *UserIDStatsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserIDStatsResponse.Merge(dst, src)
+func (m *UserIDStatsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserIDStatsResponse.Merge(m, src)
}
func (m *UserIDStatsResponse) XXX_Size() int {
return m.Size()
@@ -712,13 +706,13 @@ func (m *UserIDStatsResponse) GetData() *UserStatsResponse {
}
type UsersStatsResponse struct {
- Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats" json:"stats,omitempty"`
+ Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"`
}
func (m *UsersStatsResponse) Reset() { *m = UsersStatsResponse{} }
func (*UsersStatsResponse) ProtoMessage() {}
func (*UsersStatsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{14}
+ return fileDescriptor_db0f8a1e534b119a, []int{14}
}
func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -735,8 +729,8 @@ func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte,
return b[:n], nil
}
}
-func (dst *UsersStatsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UsersStatsResponse.Merge(dst, src)
+func (m *UsersStatsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UsersStatsResponse.Merge(m, src)
}
func (m *UsersStatsResponse) XXX_Size() int {
return m.Size()
@@ -757,13 +751,13 @@ func (m *UsersStatsResponse) GetStats() []*UserIDStatsResponse {
type MetricsForLabelMatchersRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"`
- MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet" json:"matchers_set,omitempty"`
+ MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"`
}
func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} }
func (*MetricsForLabelMatchersRequest) ProtoMessage() {}
func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{15}
+ return fileDescriptor_db0f8a1e534b119a, []int{15}
}
func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -780,8 +774,8 @@ func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic boo
return b[:n], nil
}
}
-func (dst *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(dst, src)
+func (m *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(m, src)
}
func (m *MetricsForLabelMatchersRequest) XXX_Size() int {
return m.Size()
@@ -814,13 +808,13 @@ func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers {
}
type MetricsForLabelMatchersResponse struct {
- Metric []*Metric `protobuf:"bytes,1,rep,name=metric" json:"metric,omitempty"`
+ Metric []*Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"`
}
func (m *MetricsForLabelMatchersResponse) Reset() { *m = MetricsForLabelMatchersResponse{} }
func (*MetricsForLabelMatchersResponse) ProtoMessage() {}
func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{16}
+ return fileDescriptor_db0f8a1e534b119a, []int{16}
}
func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -837,8 +831,8 @@ func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bo
return b[:n], nil
}
}
-func (dst *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(dst, src)
+func (m *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(m, src)
}
func (m *MetricsForLabelMatchersResponse) XXX_Size() int {
return m.Size()
@@ -857,16 +851,16 @@ func (m *MetricsForLabelMatchersResponse) GetMetric() []*Metric {
}
type TimeSeriesChunk struct {
- FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"`
- UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
- Labels []LabelPair `protobuf:"bytes,3,rep,name=labels" json:"labels"`
- Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks" json:"chunks"`
+ FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"`
+ UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
+ Labels []LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
+ Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"`
}
func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} }
func (*TimeSeriesChunk) ProtoMessage() {}
func (*TimeSeriesChunk) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{17}
+ return fileDescriptor_db0f8a1e534b119a, []int{17}
}
func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -883,8 +877,8 @@ func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, err
return b[:n], nil
}
}
-func (dst *TimeSeriesChunk) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TimeSeriesChunk.Merge(dst, src)
+func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeriesChunk.Merge(m, src)
}
func (m *TimeSeriesChunk) XXX_Size() int {
return m.Size()
@@ -909,13 +903,6 @@ func (m *TimeSeriesChunk) GetUserId() string {
return ""
}
-func (m *TimeSeriesChunk) GetLabels() []LabelPair {
- if m != nil {
- return m.Labels
- }
- return nil
-}
-
func (m *TimeSeriesChunk) GetChunks() []Chunk {
if m != nil {
return m.Chunks
@@ -933,7 +920,7 @@ type Chunk struct {
func (m *Chunk) Reset() { *m = Chunk{} }
func (*Chunk) ProtoMessage() {}
func (*Chunk) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{18}
+ return fileDescriptor_db0f8a1e534b119a, []int{18}
}
func (m *Chunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -950,8 +937,8 @@ func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *Chunk) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Chunk.Merge(dst, src)
+func (m *Chunk) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Chunk.Merge(m, src)
}
func (m *Chunk) XXX_Size() int {
return m.Size()
@@ -996,7 +983,7 @@ type TransferChunksResponse struct {
func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} }
func (*TransferChunksResponse) ProtoMessage() {}
func (*TransferChunksResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{19}
+ return fileDescriptor_db0f8a1e534b119a, []int{19}
}
func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1013,8 +1000,8 @@ func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]by
return b[:n], nil
}
}
-func (dst *TransferChunksResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TransferChunksResponse.Merge(dst, src)
+func (m *TransferChunksResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TransferChunksResponse.Merge(m, src)
}
func (m *TransferChunksResponse) XXX_Size() int {
return m.Size()
@@ -1026,15 +1013,15 @@ func (m *TransferChunksResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo
type TimeSeries struct {
- Labels []LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels"`
+ Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
// Sorted by time, oldest sample first.
- Samples []Sample `protobuf:"bytes,2,rep,name=samples" json:"samples"`
+ Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{20}
+ return fileDescriptor_db0f8a1e534b119a, []int{20}
}
func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1051,8 +1038,8 @@ func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *TimeSeries) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TimeSeries.Merge(dst, src)
+func (m *TimeSeries) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeries.Merge(m, src)
}
func (m *TimeSeries) XXX_Size() int {
return m.Size()
@@ -1063,13 +1050,6 @@ func (m *TimeSeries) XXX_DiscardUnknown() {
var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
-func (m *TimeSeries) GetLabels() []LabelPair {
- if m != nil {
- return m.Labels
- }
- return nil
-}
-
func (m *TimeSeries) GetSamples() []Sample {
if m != nil {
return m.Samples
@@ -1078,14 +1058,14 @@ func (m *TimeSeries) GetSamples() []Sample {
}
type LabelPair struct {
- Name github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,1,opt,name=name,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"name"`
- Value github_com_cortexproject_cortex_pkg_util_wire.Bytes `protobuf:"bytes,2,opt,name=value,proto3,customtype=github.com/cortexproject/cortex/pkg/util/wire.Bytes" json:"value"`
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{21}
+ return fileDescriptor_db0f8a1e534b119a, []int{21}
}
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1102,8 +1082,8 @@ func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(dst, src)
+func (m *LabelPair) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelPair.Merge(m, src)
}
func (m *LabelPair) XXX_Size() int {
return m.Size()
@@ -1114,6 +1094,20 @@ func (m *LabelPair) XXX_DiscardUnknown() {
var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+func (m *LabelPair) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LabelPair) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
@@ -1122,7 +1116,7 @@ type Sample struct {
func (m *Sample) Reset() { *m = Sample{} }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{22}
+ return fileDescriptor_db0f8a1e534b119a, []int{22}
}
func (m *Sample) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1139,8 +1133,8 @@ func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *Sample) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Sample.Merge(dst, src)
+func (m *Sample) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Sample.Merge(m, src)
}
func (m *Sample) XXX_Size() int {
return m.Size()
@@ -1166,13 +1160,13 @@ func (m *Sample) GetTimestampMs() int64 {
}
type LabelMatchers struct {
- Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers" json:"matchers,omitempty"`
+ Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"`
}
func (m *LabelMatchers) Reset() { *m = LabelMatchers{} }
func (*LabelMatchers) ProtoMessage() {}
func (*LabelMatchers) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{23}
+ return fileDescriptor_db0f8a1e534b119a, []int{23}
}
func (m *LabelMatchers) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1189,8 +1183,8 @@ func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return b[:n], nil
}
}
-func (dst *LabelMatchers) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelMatchers.Merge(dst, src)
+func (m *LabelMatchers) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelMatchers.Merge(m, src)
}
func (m *LabelMatchers) XXX_Size() int {
return m.Size()
@@ -1209,13 +1203,13 @@ func (m *LabelMatchers) GetMatchers() []*LabelMatcher {
}
type Metric struct {
- Labels []LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels"`
+ Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{24}
+ return fileDescriptor_db0f8a1e534b119a, []int{24}
}
func (m *Metric) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1232,8 +1226,8 @@ func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(dst, src)
+func (m *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(m, src)
}
func (m *Metric) XXX_Size() int {
return m.Size()
@@ -1244,13 +1238,6 @@ func (m *Metric) XXX_DiscardUnknown() {
var xxx_messageInfo_Metric proto.InternalMessageInfo
-func (m *Metric) GetLabels() []LabelPair {
- if m != nil {
- return m.Labels
- }
- return nil
-}
-
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
@@ -1260,7 +1247,7 @@ type LabelMatcher struct {
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) {
- return fileDescriptor_cortex_dc30309a17c87a98, []int{25}
+ return fileDescriptor_db0f8a1e534b119a, []int{25}
}
func (m *LabelMatcher) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1277,8 +1264,8 @@ func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return b[:n], nil
}
}
-func (dst *LabelMatcher) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelMatcher.Merge(dst, src)
+func (m *LabelMatcher) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelMatcher.Merge(m, src)
}
func (m *LabelMatcher) XXX_Size() int {
return m.Size()
@@ -1311,6 +1298,8 @@ func (m *LabelMatcher) GetValue() string {
}
func init() {
+ proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
+ proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value)
proto.RegisterType((*WriteRequest)(nil), "cortex.WriteRequest")
proto.RegisterType((*WriteResponse)(nil), "cortex.WriteResponse")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
@@ -1337,9 +1326,93 @@ func init() {
proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers")
proto.RegisterType((*Metric)(nil), "cortex.Metric")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
- proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
- proto.RegisterEnum("cortex.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value)
}
+
+func init() {
+ proto.RegisterFile("github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto", fileDescriptor_db0f8a1e534b119a)
+}
+
+var fileDescriptor_db0f8a1e534b119a = []byte{
+ // 1231 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0xc5,
+ 0x17, 0xdf, 0x8d, 0x7f, 0x24, 0x7e, 0x76, 0x5c, 0x67, 0x92, 0x7e, 0x9b, 0xba, 0xfa, 0x6e, 0xca,
+ 0x48, 0x2d, 0x11, 0x50, 0xa7, 0xa4, 0x2a, 0xf4, 0x40, 0x55, 0x9c, 0x36, 0x6d, 0x8d, 0x92, 0x34,
+ 0x1d, 0xbb, 0x80, 0x90, 0xd0, 0x6a, 0x63, 0x4f, 0x9d, 0xa5, 0xfb, 0xc3, 0x9d, 0x99, 0x45, 0xf4,
+ 0x80, 0xc4, 0x7f, 0x00, 0x47, 0xf8, 0x0f, 0x38, 0x73, 0x81, 0x33, 0xa7, 0x1e, 0x7b, 0xac, 0x38,
+ 0x54, 0xd4, 0xbd, 0x70, 0xec, 0x9f, 0x80, 0x76, 0x66, 0x76, 0xbd, 0xeb, 0xda, 0xa2, 0x02, 0xf5,
+ 0xe6, 0x79, 0xef, 0xf3, 0x3e, 0xf3, 0xe6, 0xfd, 0x5c, 0xc3, 0xc7, 0x43, 0x57, 0x1c, 0x47, 0x47,
+ 0xad, 0x7e, 0xe8, 0x6f, 0xf5, 0x43, 0x26, 0xe8, 0x37, 0x23, 0x16, 0x7e, 0x45, 0xfb, 0x42, 0x9f,
+ 0xb6, 0x46, 0x0f, 0x86, 0x5b, 0x6e, 0x30, 0xa4, 0x5c, 0x50, 0xb6, 0xd5, 0xf7, 0x5c, 0x1a, 0x24,
+ 0xaa, 0xd6, 0x88, 0x85, 0x22, 0x44, 0x65, 0x75, 0x6a, 0x5e, 0xc8, 0x30, 0x0d, 0xc3, 0x61, 0xb8,
+ 0x25, 0xd5, 0x47, 0xd1, 0x7d, 0x79, 0x92, 0x07, 0xf9, 0x4b, 0x99, 0xe1, 0xdf, 0x4c, 0xa8, 0x7d,
+ 0xc6, 0x5c, 0x41, 0x09, 0x7d, 0x18, 0x51, 0x2e, 0xd0, 0x01, 0x80, 0x70, 0x7d, 0xca, 0x29, 0x73,
+ 0x29, 0x5f, 0x37, 0xcf, 0x16, 0x36, 0xab, 0xdb, 0xa8, 0xa5, 0xaf, 0xea, 0xb9, 0x3e, 0xed, 0x4a,
+ 0xcd, 0x4e, 0xf3, 0xf1, 0xb3, 0x0d, 0xe3, 0x8f, 0x67, 0x1b, 0xe8, 0x90, 0x51, 0xc7, 0xf3, 0xc2,
+ 0x7e, 0x2f, 0xb5, 0x22, 0x19, 0x06, 0xf4, 0x21, 0x94, 0xbb, 0x61, 0xc4, 0xfa, 0x74, 0x7d, 0xe1,
+ 0xac, 0xb9, 0x59, 0xdf, 0xde, 0x48, 0xb8, 0xb2, 0xb7, 0xb6, 0x14, 0x64, 0x37, 0x88, 0x7c, 0x52,
+ 0xe6, 0xf2, 0x37, 0xde, 0x00, 0x98, 0x48, 0xd1, 0x22, 0x14, 0xda, 0x87, 0x9d, 0x86, 0x81, 0x96,
+ 0xa0, 0x48, 0xee, 0xed, 0xed, 0x36, 0x4c, 0x7c, 0x02, 0x96, 0x35, 0x07, 0x1f, 0x85, 0x01, 0xa7,
+ 0xf8, 0x2a, 0x54, 0x09, 0x75, 0x06, 0xc9, 0x4b, 0x5a, 0xb0, 0xf8, 0x30, 0xca, 0x3e, 0x63, 0x2d,
+ 0xb9, 0xfa, 0x6e, 0x44, 0xd9, 0x23, 0x0d, 0x23, 0x09, 0x08, 0x5f, 0x83, 0x9a, 0x32, 0x57, 0x74,
+ 0x68, 0x0b, 0x16, 0x19, 0xe5, 0x91, 0x27, 0x12, 0xfb, 0x93, 0x53, 0xf6, 0x0a, 0x47, 0x12, 0x14,
+ 0xfe, 0xd1, 0x84, 0x5a, 0x96, 0x1a, 0xbd, 0x07, 0x88, 0x0b, 0x87, 0x09, 0x5b, 0xc6, 0x43, 0x38,
+ 0xfe, 0xc8, 0xf6, 0x63, 0x32, 0x73, 0xb3, 0x40, 0x1a, 0x52, 0xd3, 0x4b, 0x14, 0xfb, 0x1c, 0x6d,
+ 0x42, 0x83, 0x06, 0x83, 0x3c, 0x76, 0x41, 0x62, 0xeb, 0x34, 0x18, 0x64, 0x91, 0x17, 0x61, 0xc9,
+ 0x77, 0x44, 0xff, 0x98, 0x32, 0xbe, 0x5e, 0xc8, 0x3f, 0x6d, 0xcf, 0x39, 0xa2, 0xde, 0xbe, 0x52,
+ 0x92, 0x14, 0x85, 0x3b, 0xb0, 0x9c, 0x73, 0x1a, 0x5d, 0x79, 0xcd, 0x34, 0x17, 0xe3, 0x34, 0x67,
+ 0x13, 0x8a, 0x7b, 0xb0, 0x2a, 0xa9, 0xba, 0x82, 0x51, 0xc7, 0x4f, 0x09, 0xaf, 0xce, 0x20, 0x3c,
+ 0xf5, 0x2a, 0xe1, 0xf5, 0xe3, 0x28, 0x78, 0x30, 0x83, 0xf5, 0x12, 0x20, 0xe9, 0xfa, 0xa7, 0x8e,
+ 0x17, 0x51, 0x9e, 0x04, 0xf0, 0xff, 0x00, 0x5e, 0x2c, 0xb5, 0x03, 0xc7, 0xa7, 0x32, 0x70, 0x15,
+ 0x52, 0x91, 0x92, 0x03, 0xc7, 0xa7, 0xf8, 0x0a, 0xac, 0xe6, 0x8c, 0xb4, 0x2b, 0x6f, 0x41, 0x4d,
+ 0x59, 0x7d, 0x2d, 0xe5, 0xd2, 0x99, 0x0a, 0xa9, 0x7a, 0x13, 0x28, 0x5e, 0x85, 0x95, 0xbd, 0x84,
+ 0x26, 0xb9, 0x0d, 0x5f, 0xd6, 0x3e, 0x68, 0xa1, 0x66, 0xdb, 0x80, 0xea, 0xc4, 0x87, 0x84, 0x0c,
+ 0x52, 0x27, 0x38, 0x46, 0xd0, 0xb8, 0xc7, 0x29, 0xeb, 0x0a, 0x47, 0xa4, 0x54, 0xbf, 0x9a, 0xb0,
+ 0x92, 0x11, 0x6a, 0xaa, 0x73, 0x50, 0x57, 0x3d, 0xec, 0x86, 0x81, 0xcd, 0x1c, 0xa1, 0x9e, 0x64,
+ 0x92, 0xe5, 0x54, 0x4a, 0x1c, 0x41, 0xe3, 0x57, 0x07, 0x91, 0x6f, 0xeb, 0x50, 0xc6, 0x25, 0x50,
+ 0x24, 0x95, 0x20, 0xf2, 0x55, 0x04, 0xe3, 0xaa, 0x72, 0x46, 0xae, 0x3d, 0xc5, 0x54, 0x90, 0x4c,
+ 0x0d, 0x67, 0xe4, 0x76, 0x72, 0x64, 0x2d, 0x58, 0x65, 0x91, 0x47, 0xa7, 0xe1, 0x45, 0x09, 0x5f,
+ 0x89, 0x55, 0x39, 0x3c, 0xfe, 0x12, 0x56, 0x63, 0xc7, 0x3b, 0x37, 0xf2, 0xae, 0x9f, 0x82, 0xc5,
+ 0x88, 0x53, 0x66, 0xbb, 0x03, 0x9d, 0x86, 0x72, 0x7c, 0xec, 0x0c, 0xd0, 0x05, 0x28, 0x0e, 0x1c,
+ 0xe1, 0x48, 0x37, 0xab, 0xdb, 0xa7, 0x93, 0x8c, 0xbf, 0xf2, 0x78, 0x22, 0x61, 0xf8, 0x16, 0xa0,
+ 0x58, 0xc5, 0xf3, 0xec, 0xef, 0x43, 0x89, 0xc7, 0x02, 0x5d, 0x37, 0x67, 0xb2, 0x2c, 0x53, 0x9e,
+ 0x10, 0x85, 0xc4, 0xbf, 0x98, 0x60, 0xed, 0x53, 0xc1, 0xdc, 0x3e, 0xbf, 0x19, 0xb2, 0x6c, 0xd9,
+ 0xf3, 0x37, 0xdd, 0x7e, 0x57, 0xa0, 0x96, 0x34, 0x96, 0xcd, 0xa9, 0xd0, 0x2d, 0x78, 0x72, 0x56,
+ 0x0b, 0x72, 0x52, 0x4d, 0xa0, 0x5d, 0x2a, 0x70, 0x07, 0x36, 0xe6, 0xfa, 0xac, 0x43, 0x71, 0x1e,
+ 0xca, 0xbe, 0x84, 0xe8, 0x58, 0xd4, 0x13, 0x5a, 0x65, 0x48, 0xb4, 0x16, 0xff, 0x6e, 0xc2, 0x89,
+ 0xa9, 0xb6, 0x8a, 0x9f, 0x70, 0x9f, 0x85, 0xbe, 0x9d, 0x2c, 0x8a, 0x49, 0xb6, 0xea, 0xb1, 0xbc,
+ 0xa3, 0xc5, 0x9d, 0x41, 0x36, 0x9d, 0x0b, 0xb9, 0x74, 0x5e, 0x83, 0xb2, 0x2c, 0xed, 0x64, 0xb0,
+ 0xac, 0xe4, 0x5e, 0x75, 0xe8, 0xb8, 0x6c, 0x67, 0x4d, 0x4f, 0xfe, 0x9a, 0x14, 0xb5, 0x07, 0xce,
+ 0x48, 0x50, 0x46, 0xb4, 0x19, 0x7a, 0x17, 0xca, 0xfd, 0xd8, 0x19, 0xbe, 0x5e, 0x94, 0x04, 0xcb,
+ 0x09, 0x41, 0xb6, 0xf3, 0x35, 0x04, 0x7f, 0x6f, 0x42, 0x49, 0xb9, 0xfe, 0xa6, 0x72, 0xd5, 0x84,
+ 0x25, 0x1a, 0xf4, 0xc3, 0x81, 0x1b, 0x0c, 0x65, 0x8b, 0x94, 0x48, 0x7a, 0x46, 0x48, 0x97, 0x6e,
+ 0xdc, 0x0b, 0x35, 0x5d, 0x9f, 0xeb, 0xf0, 0xbf, 0x1e, 0x73, 0x02, 0x7e, 0x9f, 0x32, 0xe9, 0x58,
+ 0x9a, 0x18, 0xfc, 0x2d, 0xc0, 0x24, 0xde, 0x99, 0x38, 0x99, 0xff, 0x2e, 0x4e, 0x2d, 0x58, 0xe4,
+ 0x8e, 0x3f, 0xf2, 0x64, 0x87, 0xe7, 0x12, 0xdd, 0x95, 0x62, 0x1d, 0xa9, 0x04, 0x84, 0x2f, 0x43,
+ 0x25, 0xa5, 0x8e, 0x3d, 0x4f, 0x27, 0x62, 0x8d, 0xc8, 0xdf, 0x68, 0x0d, 0x4a, 0x72, 0xde, 0xc9,
+ 0x40, 0xd4, 0x88, 0x3a, 0xe0, 0x36, 0x94, 0x15, 0xdf, 0x44, 0xaf, 0x66, 0x8e, 0x3a, 0xc4, 0xb3,
+ 0x72, 0x46, 0x14, 0xab, 0x62, 0x12, 0x42, 0xdc, 0x86, 0xe5, 0x5c, 0xa9, 0xe6, 0xd6, 0x8f, 0xf9,
+ 0x9a, 0xeb, 0xa7, 0xac, 0xca, 0xf7, 0x3f, 0xc7, 0x0d, 0xdb, 0x50, 0xcb, 0x5e, 0x82, 0xce, 0x41,
+ 0x51, 0x3c, 0x1a, 0xa9, 0x57, 0xd5, 0x27, 0x74, 0x52, 0xdd, 0x7b, 0x34, 0xa2, 0x44, 0xaa, 0xd3,
+ 0x88, 0xa9, 0x6a, 0x9f, 0x8a, 0x58, 0x41, 0x0a, 0xd5, 0xe1, 0x9d, 0x4f, 0xa0, 0x92, 0x1a, 0xa3,
+ 0x0a, 0x94, 0x76, 0xef, 0xde, 0x6b, 0xef, 0x35, 0x0c, 0xb4, 0x0c, 0x95, 0x83, 0x3b, 0x3d, 0x5b,
+ 0x1d, 0x4d, 0x74, 0x02, 0xaa, 0x64, 0xf7, 0xd6, 0xee, 0xe7, 0xf6, 0x7e, 0xbb, 0x77, 0xfd, 0x76,
+ 0x63, 0x01, 0x21, 0xa8, 0x2b, 0xc1, 0xc1, 0x1d, 0x2d, 0x2b, 0x6c, 0xff, 0x54, 0x82, 0xa5, 0xa4,
+ 0xeb, 0xd0, 0x65, 0x28, 0x1e, 0x46, 0xfc, 0x18, 0xad, 0xcd, 0xfa, 0x02, 0x6a, 0x9e, 0x9c, 0x92,
+ 0xea, 0xaa, 0x33, 0xd0, 0x07, 0x50, 0x92, 0xfb, 0x16, 0xcd, 0xfc, 0x7c, 0x69, 0xce, 0xfe, 0x28,
+ 0xc1, 0x06, 0xba, 0x01, 0xd5, 0xcc, 0x9e, 0x9e, 0x63, 0x7d, 0x26, 0x27, 0xcd, 0xaf, 0x74, 0x6c,
+ 0x5c, 0x34, 0xd1, 0x6d, 0xa8, 0x66, 0x56, 0x2c, 0x6a, 0xe6, 0xd2, 0x95, 0x5b, 0xd6, 0x13, 0xae,
+ 0x19, 0x3b, 0x19, 0x1b, 0x68, 0x17, 0x60, 0xb2, 0x5d, 0xd1, 0xe9, 0x1c, 0x38, 0xbb, 0x86, 0x9b,
+ 0xcd, 0x59, 0xaa, 0x94, 0x66, 0x07, 0x2a, 0xe9, 0x6e, 0x41, 0xeb, 0x33, 0xd6, 0x8d, 0x22, 0x99,
+ 0xbf, 0x88, 0xb0, 0x81, 0x6e, 0x42, 0xad, 0xed, 0x79, 0xaf, 0x43, 0xd3, 0xcc, 0x6a, 0xf8, 0x34,
+ 0x8f, 0x07, 0xa7, 0xe6, 0x8c, 0x73, 0x74, 0x3e, 0x3f, 0xb6, 0xe7, 0xed, 0xa8, 0xe6, 0xdb, 0xff,
+ 0x88, 0x4b, 0x6f, 0xdb, 0x87, 0x7a, 0x7e, 0x34, 0xa1, 0x79, 0xdf, 0x57, 0x4d, 0x2b, 0x55, 0xcc,
+ 0x9e, 0x65, 0xc6, 0xa6, 0xb9, 0xf3, 0xd1, 0x93, 0xe7, 0x96, 0xf1, 0xf4, 0xb9, 0x65, 0xbc, 0x7c,
+ 0x6e, 0x99, 0xdf, 0x8d, 0x2d, 0xf3, 0xe7, 0xb1, 0x65, 0x3e, 0x1e, 0x5b, 0xe6, 0x93, 0xb1, 0x65,
+ 0xfe, 0x39, 0xb6, 0xcc, 0xbf, 0xc6, 0x96, 0xf1, 0x72, 0x6c, 0x99, 0x3f, 0xbc, 0xb0, 0x8c, 0x27,
+ 0x2f, 0x2c, 0xe3, 0xe9, 0x0b, 0xcb, 0xf8, 0xa2, 0xac, 0xfe, 0x7b, 0x1c, 0x95, 0xe5, 0xdf, 0x87,
+ 0x4b, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x95, 0x27, 0x3b, 0x4e, 0xb9, 0x0c, 0x00, 0x00,
+}
+
func (x MatchType) String() string {
s, ok := MatchType_name[int32(x)]
if ok {
@@ -1864,7 +1937,7 @@ func (this *TimeSeriesChunk) Equal(that interface{}) bool {
return false
}
for i := range this.Labels {
- if !this.Labels[i].Equal(&that1.Labels[i]) {
+ if !this.Labels[i].Equal(that1.Labels[i]) {
return false
}
}
@@ -1955,7 +2028,7 @@ func (this *TimeSeries) Equal(that interface{}) bool {
return false
}
for i := range this.Labels {
- if !this.Labels[i].Equal(&that1.Labels[i]) {
+ if !this.Labels[i].Equal(that1.Labels[i]) {
return false
}
}
@@ -1988,10 +2061,10 @@ func (this *LabelPair) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if !this.Name.Equal(that1.Name) {
+ if !bytes.Equal(this.Name, that1.Name) {
return false
}
- if !this.Value.Equal(that1.Value) {
+ if !bytes.Equal(this.Value, that1.Value) {
return false
}
return true
@@ -2075,7 +2148,7 @@ func (this *Metric) Equal(that interface{}) bool {
return false
}
for i := range this.Labels {
- if !this.Labels[i].Equal(&that1.Labels[i]) {
+ if !this.Labels[i].Equal(that1.Labels[i]) {
return false
}
}
@@ -2321,13 +2394,7 @@ func (this *TimeSeriesChunk) GoString() string {
s = append(s, "&client.TimeSeriesChunk{")
s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n")
s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n")
- if this.Labels != nil {
- vs := make([]*LabelPair, len(this.Labels))
- for i := range vs {
- vs[i] = &this.Labels[i]
- }
- s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n")
- }
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
if this.Chunks != nil {
vs := make([]*Chunk, len(this.Chunks))
for i := range vs {
@@ -2366,13 +2433,7 @@ func (this *TimeSeries) GoString() string {
}
s := make([]string, 0, 6)
s = append(s, "&client.TimeSeries{")
- if this.Labels != nil {
- vs := make([]*LabelPair, len(this.Labels))
- for i := range vs {
- vs[i] = &this.Labels[i]
- }
- s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n")
- }
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
if this.Samples != nil {
vs := make([]*Sample, len(this.Samples))
for i := range vs {
@@ -2423,13 +2484,7 @@ func (this *Metric) GoString() string {
}
s := make([]string, 0, 5)
s = append(s, "&client.Metric{")
- if this.Labels != nil {
- vs := make([]*LabelPair, len(this.Labels))
- for i := range vs {
- vs[i] = &this.Labels[i]
- }
- s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n")
- }
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -3259,9 +3314,9 @@ func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintCortex(dAtA, i, uint64(m.Data.Size()))
- n1, err := m.Data.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ n1, err1 := m.Data.MarshalTo(dAtA[i:])
+ if err1 != nil {
+ return 0, err1
}
i += n1
}
@@ -3536,22 +3591,18 @@ func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- dAtA[i] = 0xa
- i++
- i = encodeVarintCortex(dAtA, i, uint64(m.Name.Size()))
- n2, err := m.Name.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
}
- i += n2
- dAtA[i] = 0x12
- i++
- i = encodeVarintCortex(dAtA, i, uint64(m.Value.Size()))
- n3, err := m.Value.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
}
- i += n3
return i, nil
}
@@ -4033,10 +4084,14 @@ func (m *LabelPair) Size() (n int) {
}
var l int
_ = l
- l = m.Name.Size()
- n += 1 + l + sovCortex(uint64(l))
- l = m.Value.Size()
- n += 1 + l + sovCortex(uint64(l))
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
return n
}
@@ -4142,8 +4197,13 @@ func (this *ReadRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForQueries := "[]*QueryRequest{"
+ for _, f := range this.Queries {
+ repeatedStringForQueries += strings.Replace(f.String(), "QueryRequest", "QueryRequest", 1) + ","
+ }
+ repeatedStringForQueries += "}"
s := strings.Join([]string{`&ReadRequest{`,
- `Queries:` + strings.Replace(fmt.Sprintf("%v", this.Queries), "QueryRequest", "QueryRequest", 1) + `,`,
+ `Queries:` + repeatedStringForQueries + `,`,
`}`,
}, "")
return s
@@ -4152,8 +4212,13 @@ func (this *ReadResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForResults := "[]*QueryResponse{"
+ for _, f := range this.Results {
+ repeatedStringForResults += strings.Replace(f.String(), "QueryResponse", "QueryResponse", 1) + ","
+ }
+ repeatedStringForResults += "}"
s := strings.Join([]string{`&ReadResponse{`,
- `Results:` + strings.Replace(fmt.Sprintf("%v", this.Results), "QueryResponse", "QueryResponse", 1) + `,`,
+ `Results:` + repeatedStringForResults + `,`,
`}`,
}, "")
return s
@@ -4162,10 +4227,15 @@ func (this *QueryRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMatchers := "[]*LabelMatcher{"
+ for _, f := range this.Matchers {
+ repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + ","
+ }
+ repeatedStringForMatchers += "}"
s := strings.Join([]string{`&QueryRequest{`,
`StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`,
`EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`,
- `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`,
+ `Matchers:` + repeatedStringForMatchers + `,`,
`}`,
}, "")
return s
@@ -4174,8 +4244,13 @@ func (this *QueryResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForTimeseries := "[]TimeSeries{"
+ for _, f := range this.Timeseries {
+ repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTimeseries += "}"
s := strings.Join([]string{`&QueryResponse{`,
- `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeries", "TimeSeries", 1), `&`, ``, 1) + `,`,
+ `Timeseries:` + repeatedStringForTimeseries + `,`,
`}`,
}, "")
return s
@@ -4184,8 +4259,13 @@ func (this *QueryStreamResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForTimeseries := "[]TimeSeriesChunk{"
+ for _, f := range this.Timeseries {
+ repeatedStringForTimeseries += strings.Replace(strings.Replace(f.String(), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTimeseries += "}"
s := strings.Join([]string{`&QueryStreamResponse{`,
- `Timeseries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timeseries), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + `,`,
+ `Timeseries:` + repeatedStringForTimeseries + `,`,
`}`,
}, "")
return s
@@ -4257,7 +4337,7 @@ func (this *UserIDStatsResponse) String() string {
}
s := strings.Join([]string{`&UserIDStatsResponse{`,
`UserId:` + fmt.Sprintf("%v", this.UserId) + `,`,
- `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "UserStatsResponse", "UserStatsResponse", 1) + `,`,
+ `Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`,
`}`,
}, "")
return s
@@ -4266,8 +4346,13 @@ func (this *UsersStatsResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForStats := "[]*UserIDStatsResponse{"
+ for _, f := range this.Stats {
+ repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + ","
+ }
+ repeatedStringForStats += "}"
s := strings.Join([]string{`&UsersStatsResponse{`,
- `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "UserIDStatsResponse", "UserIDStatsResponse", 1) + `,`,
+ `Stats:` + repeatedStringForStats + `,`,
`}`,
}, "")
return s
@@ -4276,10 +4361,15 @@ func (this *MetricsForLabelMatchersRequest) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMatchersSet := "[]*LabelMatchers{"
+ for _, f := range this.MatchersSet {
+ repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + ","
+ }
+ repeatedStringForMatchersSet += "}"
s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`,
`StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`,
`EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`,
- `MatchersSet:` + strings.Replace(fmt.Sprintf("%v", this.MatchersSet), "LabelMatchers", "LabelMatchers", 1) + `,`,
+ `MatchersSet:` + repeatedStringForMatchersSet + `,`,
`}`,
}, "")
return s
@@ -4288,8 +4378,13 @@ func (this *MetricsForLabelMatchersResponse) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMetric := "[]*Metric{"
+ for _, f := range this.Metric {
+ repeatedStringForMetric += strings.Replace(f.String(), "Metric", "Metric", 1) + ","
+ }
+ repeatedStringForMetric += "}"
s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`,
- `Metric:` + strings.Replace(fmt.Sprintf("%v", this.Metric), "Metric", "Metric", 1) + `,`,
+ `Metric:` + repeatedStringForMetric + `,`,
`}`,
}, "")
return s
@@ -4298,11 +4393,16 @@ func (this *TimeSeriesChunk) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForChunks := "[]Chunk{"
+ for _, f := range this.Chunks {
+ repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForChunks += "}"
s := strings.Join([]string{`&TimeSeriesChunk{`,
`FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`,
`UserId:` + fmt.Sprintf("%v", this.UserId) + `,`,
- `Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`,
- `Chunks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1), `&`, ``, 1) + `,`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+ `Chunks:` + repeatedStringForChunks + `,`,
`}`,
}, "")
return s
@@ -4333,9 +4433,14 @@ func (this *TimeSeries) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForSamples := "[]Sample{"
+ for _, f := range this.Samples {
+ repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSamples += "}"
s := strings.Join([]string{`&TimeSeries{`,
- `Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`,
- `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+ `Samples:` + repeatedStringForSamples + `,`,
`}`,
}, "")
return s
@@ -4366,8 +4471,13 @@ func (this *LabelMatchers) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForMatchers := "[]*LabelMatcher{"
+ for _, f := range this.Matchers {
+ repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + ","
+ }
+ repeatedStringForMatchers += "}"
s := strings.Join([]string{`&LabelMatchers{`,
- `Matchers:` + strings.Replace(fmt.Sprintf("%v", this.Matchers), "LabelMatcher", "LabelMatcher", 1) + `,`,
+ `Matchers:` + repeatedStringForMatchers + `,`,
`}`,
}, "")
return s
@@ -4377,7 +4487,7 @@ func (this *Metric) String() string {
return "nil"
}
s := strings.Join([]string{`&Metric{`,
- `Labels:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1), `&`, ``, 1) + `,`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
`}`,
}, "")
return s
@@ -4417,7 +4527,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4445,7 +4555,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4454,6 +4564,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -4476,7 +4589,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Source |= (WriteRequest_SourceEnum(b) & 0x7F) << shift
+ m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4490,6 +4603,9 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -4517,7 +4633,7 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4540,6 +4656,9 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -4567,7 +4686,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4595,7 +4714,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4604,6 +4723,9 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -4621,6 +4743,9 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -4648,7 +4773,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4676,7 +4801,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4685,6 +4810,9 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -4702,6 +4830,9 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -4729,7 +4860,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4757,7 +4888,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.StartTimestampMs |= (int64(b) & 0x7F) << shift
+ m.StartTimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4776,7 +4907,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.EndTimestampMs |= (int64(b) & 0x7F) << shift
+ m.EndTimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4795,7 +4926,7 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4804,6 +4935,9 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -4821,6 +4955,9 @@ func (m *QueryRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -4848,7 +4985,7 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4876,7 +5013,7 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4885,6 +5022,9 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -4902,6 +5042,9 @@ func (m *QueryResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -4929,7 +5072,7 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4957,7 +5100,7 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -4966,6 +5109,9 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -4983,6 +5129,9 @@ func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5010,7 +5159,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5038,7 +5187,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5048,6 +5197,9 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5062,6 +5214,9 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5089,7 +5244,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5117,7 +5272,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5127,6 +5282,9 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5141,6 +5299,9 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5168,7 +5329,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5191,6 +5352,9 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5218,7 +5382,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5246,7 +5410,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5256,6 +5420,9 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5270,6 +5437,9 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5297,7 +5467,7 @@ func (m *UserStatsRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5320,6 +5490,9 @@ func (m *UserStatsRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5347,7 +5520,7 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5386,7 +5559,7 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.NumSeries |= (uint64(b) & 0x7F) << shift
+ m.NumSeries |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5422,6 +5595,9 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5449,7 +5625,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5477,7 +5653,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5487,6 +5663,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5506,7 +5685,7 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5515,6 +5694,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5534,6 +5716,9 @@ func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5561,7 +5746,7 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5589,7 +5774,7 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5598,6 +5783,9 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5615,6 +5803,9 @@ func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5642,7 +5833,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5670,7 +5861,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.StartTimestampMs |= (int64(b) & 0x7F) << shift
+ m.StartTimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5689,7 +5880,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.EndTimestampMs |= (int64(b) & 0x7F) << shift
+ m.EndTimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5708,7 +5899,7 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5717,6 +5908,9 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5734,6 +5928,9 @@ func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5761,7 +5958,7 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5789,7 +5986,7 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5798,6 +5995,9 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5815,6 +6015,9 @@ func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -5842,7 +6045,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5870,7 +6073,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5880,6 +6083,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5899,7 +6105,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5909,6 +6115,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5928,7 +6137,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5937,10 +6146,13 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Labels = append(m.Labels, LabelPair{})
+ m.Labels = append(m.Labels, LabelAdapter{})
if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -5959,7 +6171,7 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -5968,6 +6180,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -5985,6 +6200,9 @@ func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6012,7 +6230,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6040,7 +6258,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.StartTimestampMs |= (int64(b) & 0x7F) << shift
+ m.StartTimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6059,7 +6277,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.EndTimestampMs |= (int64(b) & 0x7F) << shift
+ m.EndTimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6078,7 +6296,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Encoding |= (int32(b) & 0x7F) << shift
+ m.Encoding |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6097,7 +6315,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6106,6 +6324,9 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -6123,6 +6344,9 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6150,7 +6374,7 @@ func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6173,6 +6397,9 @@ func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6200,7 +6427,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6228,7 +6455,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6237,10 +6464,13 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Labels = append(m.Labels, LabelPair{})
+ m.Labels = append(m.Labels, LabelAdapter{})
if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -6259,7 +6489,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6268,6 +6498,9 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -6285,6 +6518,9 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6312,7 +6548,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6340,7 +6576,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6349,11 +6585,15 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Name.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
}
iNdEx = postIndex
case 2:
@@ -6370,7 +6610,7 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6379,11 +6619,15 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
}
iNdEx = postIndex
default:
@@ -6395,6 +6639,9 @@ func (m *LabelPair) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6422,7 +6669,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6461,7 +6708,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.TimestampMs |= (int64(b) & 0x7F) << shift
+ m.TimestampMs |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6475,6 +6722,9 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6502,7 +6752,7 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6530,7 +6780,7 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6539,6 +6789,9 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -6556,6 +6809,9 @@ func (m *LabelMatchers) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6583,7 +6839,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6611,7 +6867,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6620,10 +6876,13 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Labels = append(m.Labels, LabelPair{})
+ m.Labels = append(m.Labels, LabelAdapter{})
if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -6637,6 +6896,9 @@ func (m *Metric) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6664,7 +6926,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6692,7 +6954,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= (MatchType(b) & 0x7F) << shift
+ m.Type |= MatchType(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6711,7 +6973,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6721,6 +6983,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -6740,7 +7005,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -6750,6 +7015,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -6764,6 +7032,9 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthCortex
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -6830,10 +7101,13 @@ func skipCortex(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthCortex
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthCortex
+ }
return iNdEx, nil
case 3:
for {
@@ -6862,6 +7136,9 @@ func skipCortex(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthCortex
+ }
}
return iNdEx, nil
case 4:
@@ -6880,89 +7157,3 @@ var (
ErrInvalidLengthCortex = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowCortex = fmt.Errorf("proto: integer overflow")
)
-
-func init() {
- proto.RegisterFile("github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto", fileDescriptor_cortex_dc30309a17c87a98)
-}
-
-var fileDescriptor_cortex_dc30309a17c87a98 = []byte{
- // 1247 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45,
- 0x14, 0xdf, 0x8d, 0xff, 0x24, 0x7e, 0x76, 0x5c, 0x67, 0xd2, 0xd2, 0xd4, 0x15, 0xeb, 0x32, 0x52,
- 0x4b, 0x04, 0xd4, 0x2e, 0xa9, 0x0a, 0x45, 0x50, 0x81, 0xd3, 0xba, 0xad, 0x51, 0x92, 0xa6, 0x6b,
- 0x17, 0x10, 0x12, 0x5a, 0x6d, 0xec, 0xa9, 0xb3, 0x74, 0xff, 0xb8, 0x33, 0xb3, 0x40, 0x6e, 0x7c,
- 0x03, 0x38, 0xc2, 0x37, 0xe0, 0x86, 0xc4, 0x05, 0x3e, 0x42, 0x8f, 0x3d, 0x56, 0x1c, 0x2a, 0xea,
- 0x5e, 0x38, 0xf6, 0x23, 0xa0, 0x9d, 0x99, 0x5d, 0xef, 0xba, 0xb6, 0x08, 0x42, 0xbd, 0x79, 0xde,
- 0xfb, 0xbd, 0xdf, 0xbe, 0xbf, 0xf3, 0xc6, 0xf0, 0xc9, 0xc8, 0xe1, 0x87, 0xe1, 0x41, 0x73, 0x10,
- 0x78, 0xad, 0x41, 0x40, 0x39, 0xf9, 0x6e, 0x4c, 0x83, 0xaf, 0xc9, 0x80, 0xab, 0x53, 0x6b, 0xfc,
- 0x60, 0xd4, 0x72, 0xfc, 0x11, 0x61, 0x9c, 0xd0, 0xd6, 0xc0, 0x75, 0x88, 0x1f, 0xab, 0x9a, 0x63,
- 0x1a, 0xf0, 0x00, 0x15, 0xe5, 0xa9, 0x7e, 0x31, 0xc5, 0x34, 0x0a, 0x46, 0x41, 0x4b, 0xa8, 0x0f,
- 0xc2, 0xfb, 0xe2, 0x24, 0x0e, 0xe2, 0x97, 0x34, 0xc3, 0x7f, 0xe8, 0x50, 0xf9, 0x9c, 0x3a, 0x9c,
- 0x98, 0xe4, 0x61, 0x48, 0x18, 0x47, 0x7b, 0x00, 0xdc, 0xf1, 0x08, 0x23, 0xd4, 0x21, 0x6c, 0x43,
- 0x3f, 0x97, 0xdb, 0x2c, 0x6f, 0xa1, 0xa6, 0xfa, 0x54, 0xdf, 0xf1, 0x48, 0x4f, 0x68, 0xb6, 0xeb,
- 0x8f, 0x9e, 0x36, 0xb4, 0x3f, 0x9f, 0x36, 0xd0, 0x3e, 0x25, 0xb6, 0xeb, 0x06, 0x83, 0x7e, 0x62,
- 0x65, 0xa6, 0x18, 0xd0, 0xfb, 0x50, 0xec, 0x05, 0x21, 0x1d, 0x90, 0x8d, 0xa5, 0x73, 0xfa, 0x66,
- 0x75, 0xab, 0x11, 0x73, 0xa5, 0xbf, 0xda, 0x94, 0x90, 0x8e, 0x1f, 0x7a, 0x66, 0x91, 0x89, 0xdf,
- 0xb8, 0x01, 0x30, 0x95, 0xa2, 0x65, 0xc8, 0xb5, 0xf7, 0xbb, 0x35, 0x0d, 0xad, 0x40, 0xde, 0xbc,
- 0xb7, 0xd3, 0xa9, 0xe9, 0xf8, 0x04, 0xac, 0x2a, 0x0e, 0x36, 0x0e, 0x7c, 0x46, 0xf0, 0x35, 0x28,
- 0x9b, 0xc4, 0x1e, 0xc6, 0x91, 0x34, 0x61, 0xf9, 0x61, 0x98, 0x0e, 0xe3, 0x64, 0xfc, 0xe9, 0xbb,
- 0x21, 0xa1, 0x47, 0x0a, 0x66, 0xc6, 0x20, 0xfc, 0x31, 0x54, 0xa4, 0xb9, 0xa4, 0x43, 0x2d, 0x58,
- 0xa6, 0x84, 0x85, 0x2e, 0x8f, 0xed, 0x4f, 0xcd, 0xd8, 0x4b, 0x9c, 0x19, 0xa3, 0xf0, 0x4f, 0x3a,
- 0x54, 0xd2, 0xd4, 0xe8, 0x1d, 0x40, 0x8c, 0xdb, 0x94, 0x5b, 0x22, 0x1f, 0xdc, 0xf6, 0xc6, 0x96,
- 0x17, 0x91, 0xe9, 0x9b, 0x39, 0xb3, 0x26, 0x34, 0xfd, 0x58, 0xb1, 0xcb, 0xd0, 0x26, 0xd4, 0x88,
- 0x3f, 0xcc, 0x62, 0x97, 0x04, 0xb6, 0x4a, 0xfc, 0x61, 0x1a, 0x79, 0x09, 0x56, 0x3c, 0x9b, 0x0f,
- 0x0e, 0x09, 0x65, 0x1b, 0xb9, 0x6c, 0x68, 0x3b, 0xf6, 0x01, 0x71, 0x77, 0xa5, 0xd2, 0x4c, 0x50,
- 0xb8, 0x0b, 0xab, 0x19, 0xa7, 0xd1, 0xd5, 0x63, 0x96, 0x39, 0x1f, 0x95, 0x39, 0x5d, 0x50, 0xdc,
- 0x87, 0x75, 0x41, 0xd5, 0xe3, 0x94, 0xd8, 0x5e, 0x42, 0x78, 0x6d, 0x0e, 0xe1, 0xe9, 0x97, 0x09,
- 0xaf, 0x1f, 0x86, 0xfe, 0x83, 0x39, 0xac, 0x97, 0x01, 0x09, 0xd7, 0x3f, 0xb3, 0xdd, 0x90, 0xb0,
- 0x38, 0x81, 0xaf, 0x03, 0xb8, 0x91, 0xd4, 0xf2, 0x6d, 0x8f, 0x88, 0xc4, 0x95, 0xcc, 0x92, 0x90,
- 0xec, 0xd9, 0x1e, 0xc1, 0x57, 0x61, 0x3d, 0x63, 0xa4, 0x5c, 0x79, 0x03, 0x2a, 0xd2, 0xea, 0x1b,
- 0x21, 0x17, 0xce, 0x94, 0xcc, 0xb2, 0x3b, 0x85, 0xe2, 0x75, 0x58, 0xdb, 0x89, 0x69, 0xe2, 0xaf,
- 0xe1, 0x2b, 0xca, 0x07, 0x25, 0x54, 0x6c, 0x0d, 0x28, 0x4f, 0x7d, 0x88, 0xc9, 0x20, 0x71, 0x82,
- 0x61, 0x04, 0xb5, 0x7b, 0x8c, 0xd0, 0x1e, 0xb7, 0x79, 0x42, 0xf5, 0xbb, 0x0e, 0x6b, 0x29, 0xa1,
- 0xa2, 0x3a, 0x0f, 0x55, 0x39, 0xc3, 0x4e, 0xe0, 0x5b, 0xd4, 0xe6, 0x32, 0x24, 0xdd, 0x5c, 0x4d,
- 0xa4, 0xa6, 0xcd, 0x49, 0x14, 0xb5, 0x1f, 0x7a, 0x96, 0x4a, 0x65, 0xd4, 0x02, 0x79, 0xb3, 0xe4,
- 0x87, 0x9e, 0xcc, 0x60, 0xd4, 0x55, 0xf6, 0xd8, 0xb1, 0x66, 0x98, 0x72, 0x82, 0xa9, 0x66, 0x8f,
- 0x9d, 0x6e, 0x86, 0xac, 0x09, 0xeb, 0x34, 0x74, 0xc9, 0x2c, 0x3c, 0x2f, 0xe0, 0x6b, 0x91, 0x2a,
- 0x83, 0xc7, 0x5f, 0xc1, 0x7a, 0xe4, 0x78, 0xf7, 0x46, 0xd6, 0xf5, 0xd3, 0xb0, 0x1c, 0x32, 0x42,
- 0x2d, 0x67, 0xa8, 0xca, 0x50, 0x8c, 0x8e, 0xdd, 0x21, 0xba, 0x08, 0xf9, 0xa1, 0xcd, 0x6d, 0xe1,
- 0x66, 0x79, 0xeb, 0x4c, 0x5c, 0xf1, 0x97, 0x82, 0x37, 0x05, 0x0c, 0xdf, 0x02, 0x14, 0xa9, 0x58,
- 0x96, 0xfd, 0x5d, 0x28, 0xb0, 0x48, 0xa0, 0xfa, 0xe6, 0x6c, 0x9a, 0x65, 0xc6, 0x13, 0x53, 0x22,
- 0xf1, 0x6f, 0x3a, 0x18, 0xbb, 0x84, 0x53, 0x67, 0xc0, 0x6e, 0x06, 0x34, 0xdd, 0xf6, 0xec, 0x55,
- 0x8f, 0xdf, 0x55, 0xa8, 0xc4, 0x83, 0x65, 0x31, 0xc2, 0xd5, 0x08, 0x9e, 0x9a, 0x37, 0x82, 0xcc,
- 0x2c, 0xc7, 0xd0, 0x1e, 0xe1, 0xb8, 0x0b, 0x8d, 0x85, 0x3e, 0xab, 0x54, 0x5c, 0x80, 0xa2, 0x27,
- 0x20, 0x2a, 0x17, 0xd5, 0x98, 0x56, 0x1a, 0x9a, 0x4a, 0x1b, 0xc5, 0x7f, 0x62, 0x66, 0xac, 0xa2,
- 0x10, 0xee, 0xd3, 0xc0, 0xb3, 0xe2, 0x45, 0x31, 0xad, 0x56, 0x35, 0x92, 0x77, 0x95, 0xb8, 0x3b,
- 0x4c, 0x97, 0x73, 0x29, 0x53, 0xce, 0x16, 0x14, 0x45, 0x6b, 0xc7, 0x17, 0xcb, 0x5a, 0x26, 0xaa,
- 0x7d, 0xdb, 0xa1, 0x6a, 0x78, 0x15, 0x0c, 0xbd, 0x0d, 0xc5, 0x41, 0xf4, 0x71, 0xb6, 0x91, 0x17,
- 0x06, 0xab, 0xb1, 0x41, 0x7a, 0xd2, 0x15, 0x04, 0xff, 0xa0, 0x43, 0x41, 0xba, 0xfa, 0xaa, 0x6a,
- 0x53, 0x87, 0x15, 0xe2, 0x0f, 0x82, 0xa1, 0xe3, 0x8f, 0xc4, 0x48, 0x14, 0xcc, 0xe4, 0x8c, 0x90,
- 0x6a, 0xd5, 0xa8, 0xf7, 0x2b, 0xaa, 0x1f, 0x37, 0xe0, 0xb5, 0x3e, 0xb5, 0x7d, 0x76, 0x9f, 0x50,
- 0xe1, 0x58, 0x52, 0x08, 0xec, 0x01, 0x4c, 0xf3, 0x9b, 0xca, 0x8b, 0x7e, 0xbc, 0xbc, 0x34, 0x61,
- 0x99, 0xd9, 0xde, 0xd8, 0x15, 0x13, 0x9c, 0x29, 0x64, 0x4f, 0x88, 0x15, 0x3c, 0x06, 0xe1, 0x5f,
- 0x75, 0x28, 0x25, 0x5c, 0xe8, 0x0e, 0xe4, 0x93, 0x2b, 0xaf, 0xb2, 0xfd, 0xa1, 0xda, 0xb5, 0x97,
- 0x8f, 0xf3, 0x4a, 0x08, 0xb9, 0xe3, 0xb6, 0xbe, 0x75, 0x28, 0x69, 0x6e, 0x1f, 0x71, 0xc2, 0x4c,
- 0x41, 0x84, 0xee, 0x42, 0x41, 0xdc, 0x86, 0x22, 0x6d, 0xff, 0x93, 0x51, 0x32, 0xe1, 0x36, 0x14,
- 0x65, 0x28, 0xe8, 0x64, 0x4c, 0x2e, 0xaf, 0x33, 0x79, 0x88, 0xae, 0xe1, 0x39, 0x05, 0x2b, 0xf3,
- 0x69, 0xb5, 0x70, 0x1b, 0x56, 0x33, 0x53, 0x90, 0xd9, 0x6c, 0xfa, 0xb1, 0x36, 0xdb, 0x07, 0x50,
- 0x94, 0x93, 0xf1, 0x9f, 0x4b, 0x84, 0x2d, 0xa8, 0xa4, 0x49, 0xd1, 0x79, 0xc8, 0xf3, 0xa3, 0xb1,
- 0x8c, 0xa2, 0x3a, 0x35, 0x17, 0xea, 0xfe, 0xd1, 0x98, 0x98, 0x42, 0x1d, 0xb5, 0x91, 0xa8, 0x8d,
- 0x1c, 0x1c, 0x99, 0xde, 0x24, 0x03, 0x39, 0x21, 0x94, 0x87, 0xb7, 0x3e, 0x85, 0x52, 0x62, 0x8c,
- 0x4a, 0x50, 0xe8, 0xdc, 0xbd, 0xd7, 0xde, 0xa9, 0x69, 0x68, 0x15, 0x4a, 0x7b, 0x77, 0xfa, 0x96,
- 0x3c, 0xea, 0xe8, 0x04, 0x94, 0xcd, 0xce, 0xad, 0xce, 0x17, 0xd6, 0x6e, 0xbb, 0x7f, 0xfd, 0x76,
- 0x6d, 0x09, 0x21, 0xa8, 0x4a, 0xc1, 0xde, 0x1d, 0x25, 0xcb, 0x6d, 0xfd, 0x5c, 0x80, 0x95, 0x78,
- 0x80, 0xd1, 0x15, 0xc8, 0xef, 0x87, 0xec, 0x10, 0x9d, 0x9c, 0xf7, 0x98, 0xaa, 0x9f, 0x9a, 0x91,
- 0xaa, 0x86, 0xd6, 0xd0, 0x7b, 0x50, 0x10, 0xab, 0x1b, 0xcd, 0x7d, 0x09, 0xd5, 0xe7, 0xbf, 0x6f,
- 0xb0, 0x86, 0x6e, 0x40, 0x39, 0xb5, 0xf2, 0x17, 0x58, 0x9f, 0xcd, 0x48, 0xb3, 0xaf, 0x03, 0xac,
- 0x5d, 0xd2, 0xd1, 0x6d, 0x28, 0xa7, 0xb6, 0x35, 0xaa, 0x67, 0xca, 0x93, 0xd9, 0xfb, 0x53, 0xae,
- 0x39, 0xeb, 0x1d, 0x6b, 0xa8, 0x03, 0x30, 0x5d, 0xd4, 0xe8, 0x4c, 0x06, 0x9c, 0xde, 0xe8, 0xf5,
- 0xfa, 0x3c, 0x55, 0x42, 0xb3, 0x0d, 0xa5, 0x64, 0x4d, 0xa1, 0x8d, 0x39, 0x9b, 0x4b, 0x92, 0x2c,
- 0xde, 0x69, 0x58, 0x43, 0x37, 0xa1, 0xd2, 0x76, 0xdd, 0xe3, 0xd0, 0xd4, 0xd3, 0x1a, 0x36, 0xcb,
- 0xe3, 0xc2, 0xe9, 0x05, 0x9b, 0x01, 0x5d, 0xc8, 0x6e, 0x80, 0x45, 0xeb, 0xae, 0xfe, 0xe6, 0xbf,
- 0xe2, 0x92, 0xaf, 0xed, 0x42, 0x35, 0x7b, 0xeb, 0xa1, 0x45, 0x4f, 0xb5, 0xba, 0x91, 0x28, 0xe6,
- 0x5f, 0x93, 0xda, 0xa6, 0xbe, 0xfd, 0xd1, 0xe3, 0x67, 0x86, 0xf6, 0xe4, 0x99, 0xa1, 0xbd, 0x78,
- 0x66, 0xe8, 0xdf, 0x4f, 0x0c, 0xfd, 0x97, 0x89, 0xa1, 0x3f, 0x9a, 0x18, 0xfa, 0xe3, 0x89, 0xa1,
- 0xff, 0x35, 0x31, 0xf4, 0xbf, 0x27, 0x86, 0xf6, 0x62, 0x62, 0xe8, 0x3f, 0x3e, 0x37, 0xb4, 0xc7,
- 0xcf, 0x0d, 0xed, 0xc9, 0x73, 0x43, 0xfb, 0xb2, 0x28, 0xff, 0xc6, 0x1c, 0x14, 0xc5, 0x3f, 0x91,
- 0xcb, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x33, 0xbf, 0x53, 0xf9, 0x04, 0x0d, 0x00, 0x00,
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto
index 02e17e35b8a1f..e1659919920f7 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto
@@ -104,7 +104,7 @@ message MetricsForLabelMatchersResponse {
message TimeSeriesChunk {
string from_ingester_id = 1;
string user_id = 2;
- repeated LabelPair labels = 3 [(gogoproto.nullable) = false];
+ repeated LabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
repeated Chunk chunks = 4 [(gogoproto.nullable) = false];
}
@@ -119,14 +119,14 @@ message TransferChunksResponse {
}
message TimeSeries {
- repeated LabelPair labels = 1 [(gogoproto.nullable) = false];
+ repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
// Sorted by time, oldest sample first.
repeated Sample samples = 2 [(gogoproto.nullable) = false];
}
message LabelPair {
- bytes name = 1 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false];
- bytes value = 2 [(gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/util/wire.Bytes", (gogoproto.nullable) = false];
+ bytes name = 1;
+ bytes value = 2;
}
message Sample {
@@ -139,7 +139,7 @@ message LabelMatchers {
}
message Metric {
- repeated LabelPair labels = 1 [(gogoproto.nullable) = false];
+ repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
}
enum MatchType {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go
index 41453b10aaa9a..c415085950606 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go
@@ -19,6 +19,8 @@ package client
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
+ offset32 = 2166136261
+ prime32 = 16777619
)
// hashNew initializies a new fnv64a hash value.
@@ -27,7 +29,8 @@ func hashNew() uint64 {
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
-func hashAdd(h uint64, s []byte) uint64 {
+// Note this is the same algorithm as Go stdlib `sum64a.Write()`
+func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
@@ -41,3 +44,18 @@ func hashAddByte(h uint64, b byte) uint64 {
h *= prime64
return h
}
+
+// HashNew32 initializies a new fnv32 hash value.
+func HashNew32() uint32 {
+ return offset32
+}
+
+// HashAdd32 adds a string to a fnv32 hash value, returning the updated hash.
+// Note this is the same algorithm as Go stdlib `sum32.Write()`
+func HashAdd32(h uint32, s string) uint32 {
+ for i := 0; i < len(s); i++ {
+ h *= prime32
+ h ^= uint32(s[i])
+ }
+ return h
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go
index b5b46b9ab878d..8c7cc40588c8a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go
@@ -1,6 +1,14 @@
package client
-import "flag"
+import (
+ "flag"
+ "fmt"
+ "io"
+ "strings"
+ "unsafe"
+
+ "github.com/prometheus/prometheus/pkg/labels"
+)
var (
expectedTimeseries = 100
@@ -37,7 +45,182 @@ type PreallocTimeseries struct {
// Unmarshal implements proto.Message.
func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error {
- p.Labels = make([]LabelPair, 0, expectedLabels)
+ p.Labels = make([]LabelAdapter, 0, expectedLabels)
p.Samples = make([]Sample, 0, expectedSamplesPerSeries)
return p.TimeSeries.Unmarshal(dAtA)
}
+
+// LabelAdapter is a labels.Label that can be marshalled to/from protos.
+type LabelAdapter labels.Label
+
+// Marshal implements proto.Marshaller.
+func (bs *LabelAdapter) Marshal() ([]byte, error) {
+ buf := make([]byte, bs.Size())
+ _, err := bs.MarshalTo(buf)
+ return buf, err
+}
+
+// MarshalTo implements proto.Marshaller.
+func (bs *LabelAdapter) MarshalTo(buf []byte) (n int, err error) {
+ var i int
+ ls := (*labels.Label)(bs)
+
+ buf[i] = 0xa
+ i++
+ i = encodeVarintCortex(buf, i, uint64(len(ls.Name)))
+ i += copy(buf[i:], ls.Name)
+
+ buf[i] = 0x12
+ i++
+ i = encodeVarintCortex(buf, i, uint64(len(ls.Value)))
+ i += copy(buf[i:], ls.Value)
+
+ return i, nil
+}
+
+// Unmarshal a LabelAdapater, implements proto.Unmarshaller.
+// NB this is a copy of the autogenerated code to unmarshal a LabelPair,
+// with the byte copying replaced with a yoloString.
+func (bs *LabelAdapter) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LabelPair: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ bs.Name = yoloString(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ bs.Value = yoloString(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCortex(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+
+func yoloString(buf []byte) string {
+ return *((*string)(unsafe.Pointer(&buf)))
+}
+
+// Size implements proto.Sizer.
+func (bs *LabelAdapter) Size() int {
+ ls := (*labels.Label)(bs)
+ var n int
+ l := len(ls.Name)
+ n += 1 + l + sovCortex(uint64(l))
+ l = len(ls.Value)
+ n += 1 + l + sovCortex(uint64(l))
+ return n
+}
+
+// Equal implements proto.Equaler.
+func (bs *LabelAdapter) Equal(other LabelAdapter) bool {
+ return bs.Name == other.Name && bs.Value == other.Value
+}
+
+// Compare implements proto.Comparer.
+func (bs *LabelAdapter) Compare(other LabelAdapter) int {
+ if c := strings.Compare(bs.Name, other.Name); c != 0 {
+ return c
+ }
+ return strings.Compare(bs.Value, other.Value)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go
index 32771046379f7..6e072e70335e1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go
@@ -32,7 +32,7 @@ func New() *InvertedIndex {
}
// Add a fingerprint under the specified labels.
-func (ii *InvertedIndex) Add(labels []client.LabelPair, fp model.Fingerprint) labels.Labels {
+func (ii *InvertedIndex) Add(labels []client.LabelAdapter, fp model.Fingerprint) labels.Labels {
shard := &ii.shards[util.HashFP(fp)%indexShards]
return shard.add(labels, fp)
}
@@ -49,7 +49,6 @@ func (ii *InvertedIndex) Lookup(matchers []*labels.Matcher) []model.Fingerprint
result = append(result, fps...)
}
- sort.Sort(fingerprints(result))
return result
}
@@ -105,25 +104,31 @@ type indexShard struct {
pad [cacheLineSize - unsafe.Sizeof(sync.Mutex{}) - unsafe.Sizeof(unlockIndex{})]byte
}
+func copyString(s string) string {
+ return string([]byte(s))
+}
+
// add metric to the index; return all the name/value pairs as strings from the index, sorted
-func (shard *indexShard) add(metric []client.LabelPair, fp model.Fingerprint) labels.Labels {
+func (shard *indexShard) add(metric []client.LabelAdapter, fp model.Fingerprint) labels.Labels {
shard.mtx.Lock()
defer shard.mtx.Unlock()
internedLabels := make(labels.Labels, len(metric))
for i, pair := range metric {
- values, ok := shard.idx[string(pair.Name)]
+ values, ok := shard.idx[pair.Name]
if !ok {
values = indexEntry{
- name: string(pair.Name),
+ name: copyString(pair.Name),
fps: map[string]indexValueEntry{},
}
shard.idx[values.name] = values
}
- fingerprints, ok := values.fps[string(pair.Value)]
+ fingerprints, ok := values.fps[pair.Value]
if !ok {
- fingerprints = indexValueEntry{value: string(pair.Value)}
+ fingerprints = indexValueEntry{
+ value: copyString(pair.Value),
+ }
}
// Insert into the right position to keep fingerprints sorted
j := sort.Search(len(fingerprints.fps), func(i int) bool {
@@ -133,7 +138,7 @@ func (shard *indexShard) add(metric []client.LabelPair, fp model.Fingerprint) la
copy(fingerprints.fps[j+1:], fingerprints.fps[j:])
fingerprints.fps[j] = fp
values.fps[fingerprints.value] = fingerprints
- internedLabels[i] = labels.Label{Name: string(values.name), Value: string(fingerprints.value)}
+ internedLabels[i] = labels.Label{Name: values.name, Value: fingerprints.value}
}
sort.Sort(internedLabels)
return internedLabels
@@ -162,7 +167,7 @@ func (shard *indexShard) lookup(matchers []*labels.Matcher) []model.Fingerprint
// accumulate the matching fingerprints (which are all distinct)
// then sort to maintain the invariant
for value, fps := range values.fps {
- if matcher.Matches(string(value)) {
+ if matcher.Matches(value) {
toIntersect = append(toIntersect, fps.fps...)
}
}
@@ -213,7 +218,7 @@ func (shard *indexShard) delete(labels labels.Labels, fp model.Fingerprint) {
defer shard.mtx.Unlock()
for _, pair := range labels {
- name, value := string(pair.Name), string(pair.Value)
+ name, value := pair.Name, pair.Value
values, ok := shard.idx[name]
if !ok {
continue
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
index e67bc62a436d5..df54ce305a816 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
@@ -3,18 +3,17 @@
package ring
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import strconv "strconv"
-
-import strings "strings"
-import reflect "reflect"
-import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
-
-import io "io"
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ io "io"
+ math "math"
+ reflect "reflect"
+ strconv "strconv"
+ strings "strings"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -42,6 +41,7 @@ var IngesterState_name = map[int32]string{
2: "PENDING",
3: "JOINING",
}
+
var IngesterState_value = map[string]int32{
"ACTIVE": 0,
"LEAVING": 1,
@@ -50,18 +50,18 @@ var IngesterState_value = map[string]int32{
}
func (IngesterState) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_ring_35bba6cb303d16e3, []int{0}
+ return fileDescriptor_7ebe6ffe1686e76b, []int{0}
}
type Desc struct {
- Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"`
- Tokens []TokenDesc `protobuf:"bytes,2,rep,name=tokens" json:"tokens"`
+ Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Tokens []TokenDesc `protobuf:"bytes,2,rep,name=tokens,proto3" json:"tokens"`
}
func (m *Desc) Reset() { *m = Desc{} }
func (*Desc) ProtoMessage() {}
func (*Desc) Descriptor() ([]byte, []int) {
- return fileDescriptor_ring_35bba6cb303d16e3, []int{0}
+ return fileDescriptor_7ebe6ffe1686e76b, []int{0}
}
func (m *Desc) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -78,8 +78,8 @@ func (m *Desc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *Desc) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Desc.Merge(dst, src)
+func (m *Desc) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Desc.Merge(m, src)
}
func (m *Desc) XXX_Size() int {
return m.Size()
@@ -108,13 +108,13 @@ type IngesterDesc struct {
Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
State IngesterState `protobuf:"varint,3,opt,name=state,proto3,enum=ring.IngesterState" json:"state,omitempty"`
- Tokens []uint32 `protobuf:"varint,6,rep,packed,name=tokens" json:"tokens,omitempty"`
+ Tokens []uint32 `protobuf:"varint,6,rep,packed,name=tokens,proto3" json:"tokens,omitempty"`
}
func (m *IngesterDesc) Reset() { *m = IngesterDesc{} }
func (*IngesterDesc) ProtoMessage() {}
func (*IngesterDesc) Descriptor() ([]byte, []int) {
- return fileDescriptor_ring_35bba6cb303d16e3, []int{1}
+ return fileDescriptor_7ebe6ffe1686e76b, []int{1}
}
func (m *IngesterDesc) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -131,8 +131,8 @@ func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return b[:n], nil
}
}
-func (dst *IngesterDesc) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngesterDesc.Merge(dst, src)
+func (m *IngesterDesc) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngesterDesc.Merge(m, src)
}
func (m *IngesterDesc) XXX_Size() int {
return m.Size()
@@ -179,7 +179,7 @@ type TokenDesc struct {
func (m *TokenDesc) Reset() { *m = TokenDesc{} }
func (*TokenDesc) ProtoMessage() {}
func (*TokenDesc) Descriptor() ([]byte, []int) {
- return fileDescriptor_ring_35bba6cb303d16e3, []int{2}
+ return fileDescriptor_7ebe6ffe1686e76b, []int{2}
}
func (m *TokenDesc) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -196,8 +196,8 @@ func (m *TokenDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (dst *TokenDesc) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenDesc.Merge(dst, src)
+func (m *TokenDesc) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TokenDesc.Merge(m, src)
}
func (m *TokenDesc) XXX_Size() int {
return m.Size()
@@ -223,12 +223,49 @@ func (m *TokenDesc) GetIngester() string {
}
func init() {
+ proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value)
proto.RegisterType((*Desc)(nil), "ring.Desc")
proto.RegisterMapType((map[string]IngesterDesc)(nil), "ring.Desc.IngestersEntry")
proto.RegisterType((*IngesterDesc)(nil), "ring.IngesterDesc")
proto.RegisterType((*TokenDesc)(nil), "ring.TokenDesc")
- proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value)
}
+
+func init() {
+ proto.RegisterFile("github.com/cortexproject/cortex/pkg/ring/ring.proto", fileDescriptor_7ebe6ffe1686e76b)
+}
+
+var fileDescriptor_7ebe6ffe1686e76b = []byte{
+ // 440 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6e, 0xd3, 0x40,
+ 0x10, 0xc6, 0x77, 0xe2, 0x3f, 0xc4, 0x13, 0x52, 0xac, 0x05, 0x21, 0x13, 0xa1, 0xc5, 0xca, 0xc9,
+ 0x20, 0x35, 0x91, 0x52, 0x0e, 0x08, 0xa9, 0x87, 0x86, 0x46, 0x28, 0x11, 0x0a, 0x95, 0xa9, 0x7a,
+ 0x4f, 0xd2, 0xc5, 0x84, 0x90, 0xac, 0x65, 0x6f, 0x10, 0xbd, 0xf1, 0x06, 0xf0, 0x18, 0x3c, 0x09,
+ 0xea, 0x31, 0xc7, 0x9e, 0x10, 0x71, 0x2e, 0x1c, 0xfb, 0x08, 0x68, 0xd7, 0x76, 0x9a, 0x5c, 0xac,
+ 0xf9, 0xed, 0x37, 0xdf, 0xb7, 0x33, 0xd6, 0xe2, 0x51, 0x34, 0x95, 0x9f, 0x96, 0xe3, 0xd6, 0x44,
+ 0xcc, 0xdb, 0x13, 0x91, 0x48, 0xfe, 0x2d, 0x4e, 0xc4, 0x67, 0x3e, 0x91, 0x05, 0xb5, 0xe3, 0x59,
+ 0xd4, 0x4e, 0xa6, 0x8b, 0xfc, 0xd3, 0x8a, 0x13, 0x21, 0x05, 0x35, 0x55, 0xdd, 0x38, 0xdc, 0xb1,
+ 0x46, 0x22, 0x12, 0x6d, 0x2d, 0x8e, 0x97, 0x1f, 0x35, 0x69, 0xd0, 0x55, 0x6e, 0x6a, 0xfe, 0x06,
+ 0x34, 0x4f, 0x79, 0x3a, 0xa1, 0xc7, 0xe8, 0x4c, 0x17, 0x11, 0x4f, 0x25, 0x4f, 0x52, 0x0f, 0x7c,
+ 0x23, 0xa8, 0x75, 0x9e, 0xb4, 0x74, 0xba, 0x92, 0x5b, 0xfd, 0x52, 0xeb, 0x2d, 0x64, 0x72, 0xd5,
+ 0x35, 0xaf, 0xff, 0x3c, 0x23, 0xe1, 0x9d, 0x83, 0x1e, 0xa2, 0x2d, 0xc5, 0x8c, 0x2f, 0x52, 0xaf,
+ 0xa2, 0xbd, 0x0f, 0x72, 0xef, 0xb9, 0x3a, 0x53, 0x01, 0x85, 0xa3, 0x68, 0x6a, 0x9c, 0xe1, 0xc1,
+ 0x7e, 0x22, 0x75, 0xd1, 0x98, 0xf1, 0x2b, 0x0f, 0x7c, 0x08, 0x9c, 0x50, 0x95, 0x34, 0x40, 0xeb,
+ 0xeb, 0xe8, 0xcb, 0x92, 0x7b, 0x15, 0x1f, 0x82, 0x5a, 0x87, 0xe6, 0x89, 0xa5, 0x4d, 0x85, 0x86,
+ 0x79, 0xc3, 0xeb, 0xca, 0x2b, 0x68, 0xfe, 0x00, 0xbc, 0xbf, 0xab, 0x51, 0x8a, 0xe6, 0xe8, 0xf2,
+ 0x32, 0x29, 0x12, 0x75, 0x4d, 0x9f, 0xa2, 0x23, 0xa7, 0x73, 0x9e, 0xca, 0xd1, 0x3c, 0xd6, 0xb1,
+ 0x46, 0x78, 0x77, 0x40, 0x9f, 0xa3, 0x95, 0xca, 0x91, 0xe4, 0x9e, 0xe1, 0x43, 0x70, 0xd0, 0x79,
+ 0xb8, 0x7f, 0xe1, 0x07, 0x25, 0x85, 0x79, 0x07, 0x7d, 0xbc, 0x5d, 0xd7, 0xf6, 0x8d, 0xa0, 0x5e,
+ 0xee, 0x35, 0x30, 0xab, 0xa6, 0x6b, 0x0d, 0xcc, 0xaa, 0xe5, 0xda, 0xcd, 0x63, 0x74, 0xb6, 0xeb,
+ 0xd3, 0x47, 0x68, 0xe9, 0x16, 0x3d, 0x4e, 0x3d, 0xcc, 0x81, 0x36, 0xb0, 0x5a, 0xfe, 0x42, 0x3d,
+ 0x8e, 0x13, 0x6e, 0xf9, 0x45, 0x17, 0xeb, 0x7b, 0x57, 0x53, 0x44, 0xfb, 0xe4, 0xcd, 0x79, 0xff,
+ 0xa2, 0xe7, 0x12, 0x5a, 0xc3, 0x7b, 0xef, 0x7a, 0x27, 0x17, 0xfd, 0xe1, 0x5b, 0x17, 0x14, 0x9c,
+ 0xf5, 0x86, 0xa7, 0x0a, 0x2a, 0x0a, 0x06, 0xef, 0xfb, 0x43, 0x05, 0x46, 0xf7, 0xe5, 0x6a, 0xcd,
+ 0xc8, 0xcd, 0x9a, 0x91, 0xdb, 0x35, 0x83, 0xef, 0x19, 0x83, 0x5f, 0x19, 0x83, 0xeb, 0x8c, 0xc1,
+ 0x2a, 0x63, 0xf0, 0x37, 0x63, 0xf0, 0x2f, 0x63, 0xe4, 0x36, 0x63, 0xf0, 0x73, 0xc3, 0xc8, 0x6a,
+ 0xc3, 0xc8, 0xcd, 0x86, 0x91, 0xb1, 0xad, 0x9f, 0xc6, 0xd1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xab, 0x96, 0x85, 0x85, 0x86, 0x02, 0x00, 0x00,
+}
+
func (x IngesterState) String() string {
s, ok := IngesterState_name[int32(x)]
if ok {
@@ -435,9 +472,9 @@ func (m *Desc) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintRing(dAtA, i, uint64((&v).Size()))
- n1, err := (&v).MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
+ n1, err1 := (&v).MarshalTo(dAtA[i:])
+ if err1 != nil {
+ return 0, err1
}
i += n1
}
@@ -629,6 +666,11 @@ func (this *Desc) String() string {
if this == nil {
return "nil"
}
+ repeatedStringForTokens := "[]TokenDesc{"
+ for _, f := range this.Tokens {
+ repeatedStringForTokens += strings.Replace(strings.Replace(f.String(), "TokenDesc", "TokenDesc", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTokens += "}"
keysForIngesters := make([]string, 0, len(this.Ingesters))
for k, _ := range this.Ingesters {
keysForIngesters = append(keysForIngesters, k)
@@ -641,7 +683,7 @@ func (this *Desc) String() string {
mapStringForIngesters += "}"
s := strings.Join([]string{`&Desc{`,
`Ingesters:` + mapStringForIngesters + `,`,
- `Tokens:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tokens), "TokenDesc", "TokenDesc", 1), `&`, ``, 1) + `,`,
+ `Tokens:` + repeatedStringForTokens + `,`,
`}`,
}, "")
return s
@@ -693,7 +735,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -721,7 +763,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -730,6 +772,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRing
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRing
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -750,7 +795,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -767,7 +812,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -777,6 +822,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRing
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthRing
+ }
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
@@ -793,7 +841,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- mapmsglen |= (int(b) & 0x7F) << shift
+ mapmsglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -802,7 +850,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRing
}
postmsgIndex := iNdEx + mapmsglen
- if mapmsglen < 0 {
+ if postmsgIndex < 0 {
return ErrInvalidLengthRing
}
if postmsgIndex > l {
@@ -844,7 +892,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= (int(b) & 0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -853,6 +901,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRing
}
postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRing
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -870,6 +921,9 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRing
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRing
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -897,7 +951,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -925,7 +979,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -935,6 +989,9 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRing
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRing
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -954,7 +1011,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Timestamp |= (int64(b) & 0x7F) << shift
+ m.Timestamp |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -973,7 +1030,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.State |= (IngesterState(b) & 0x7F) << shift
+ m.State |= IngesterState(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -990,7 +1047,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= (uint32(b) & 0x7F) << shift
+ v |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1007,7 +1064,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- packedLen |= (int(b) & 0x7F) << shift
+ packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1016,12 +1073,15 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRing
}
postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRing
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
- for _, integer := range dAtA {
+ for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
@@ -1041,7 +1101,7 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= (uint32(b) & 0x7F) << shift
+ v |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1060,6 +1120,9 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRing
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRing
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1087,7 +1150,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
+ wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1115,7 +1178,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Token |= (uint32(b) & 0x7F) << shift
+ m.Token |= uint32(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1134,7 +1197,7 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -1144,6 +1207,9 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthRing
}
postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRing
+ }
if postIndex > l {
return io.ErrUnexpectedEOF
}
@@ -1158,6 +1224,9 @@ func (m *TokenDesc) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthRing
}
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthRing
+ }
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@@ -1224,10 +1293,13 @@ func skipRing(dAtA []byte) (n int, err error) {
break
}
}
- iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthRing
}
+ iNdEx += length
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRing
+ }
return iNdEx, nil
case 3:
for {
@@ -1256,6 +1328,9 @@ func skipRing(dAtA []byte) (n int, err error) {
return 0, err
}
iNdEx = start + next
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRing
+ }
}
return iNdEx, nil
case 4:
@@ -1274,39 +1349,3 @@ var (
ErrInvalidLengthRing = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowRing = fmt.Errorf("proto: integer overflow")
)
-
-func init() {
- proto.RegisterFile("github.com/cortexproject/cortex/pkg/ring/ring.proto", fileDescriptor_ring_35bba6cb303d16e3)
-}
-
-var fileDescriptor_ring_35bba6cb303d16e3 = []byte{
- // 440 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xcf, 0x6e, 0xd3, 0x40,
- 0x10, 0xc6, 0x77, 0xe2, 0x3f, 0xc4, 0x13, 0x52, 0xac, 0x05, 0x21, 0x13, 0xa1, 0xc5, 0xca, 0xc9,
- 0x20, 0x35, 0x91, 0x52, 0x0e, 0x08, 0xa9, 0x87, 0x86, 0x46, 0x28, 0x11, 0x0a, 0x95, 0xa9, 0x7a,
- 0x4f, 0xd2, 0xc5, 0x84, 0x90, 0xac, 0x65, 0x6f, 0x10, 0xbd, 0xf1, 0x06, 0xf0, 0x18, 0x3c, 0x09,
- 0xea, 0x31, 0xc7, 0x9e, 0x10, 0x71, 0x2e, 0x1c, 0xfb, 0x08, 0x68, 0xd7, 0x76, 0x9a, 0x5c, 0xac,
- 0xf9, 0xed, 0x37, 0xdf, 0xb7, 0x33, 0xd6, 0xe2, 0x51, 0x34, 0x95, 0x9f, 0x96, 0xe3, 0xd6, 0x44,
- 0xcc, 0xdb, 0x13, 0x91, 0x48, 0xfe, 0x2d, 0x4e, 0xc4, 0x67, 0x3e, 0x91, 0x05, 0xb5, 0xe3, 0x59,
- 0xd4, 0x4e, 0xa6, 0x8b, 0xfc, 0xd3, 0x8a, 0x13, 0x21, 0x05, 0x35, 0x55, 0xdd, 0x38, 0xdc, 0xb1,
- 0x46, 0x22, 0x12, 0x6d, 0x2d, 0x8e, 0x97, 0x1f, 0x35, 0x69, 0xd0, 0x55, 0x6e, 0x6a, 0xfe, 0x06,
- 0x34, 0x4f, 0x79, 0x3a, 0xa1, 0xc7, 0xe8, 0x4c, 0x17, 0x11, 0x4f, 0x25, 0x4f, 0x52, 0x0f, 0x7c,
- 0x23, 0xa8, 0x75, 0x9e, 0xb4, 0x74, 0xba, 0x92, 0x5b, 0xfd, 0x52, 0xeb, 0x2d, 0x64, 0x72, 0xd5,
- 0x35, 0xaf, 0xff, 0x3c, 0x23, 0xe1, 0x9d, 0x83, 0x1e, 0xa2, 0x2d, 0xc5, 0x8c, 0x2f, 0x52, 0xaf,
- 0xa2, 0xbd, 0x0f, 0x72, 0xef, 0xb9, 0x3a, 0x53, 0x01, 0x85, 0xa3, 0x68, 0x6a, 0x9c, 0xe1, 0xc1,
- 0x7e, 0x22, 0x75, 0xd1, 0x98, 0xf1, 0x2b, 0x0f, 0x7c, 0x08, 0x9c, 0x50, 0x95, 0x34, 0x40, 0xeb,
- 0xeb, 0xe8, 0xcb, 0x92, 0x7b, 0x15, 0x1f, 0x82, 0x5a, 0x87, 0xe6, 0x89, 0xa5, 0x4d, 0x85, 0x86,
- 0x79, 0xc3, 0xeb, 0xca, 0x2b, 0x68, 0xfe, 0x00, 0xbc, 0xbf, 0xab, 0x51, 0x8a, 0xe6, 0xe8, 0xf2,
- 0x32, 0x29, 0x12, 0x75, 0x4d, 0x9f, 0xa2, 0x23, 0xa7, 0x73, 0x9e, 0xca, 0xd1, 0x3c, 0xd6, 0xb1,
- 0x46, 0x78, 0x77, 0x40, 0x9f, 0xa3, 0x95, 0xca, 0x91, 0xe4, 0x9e, 0xe1, 0x43, 0x70, 0xd0, 0x79,
- 0xb8, 0x7f, 0xe1, 0x07, 0x25, 0x85, 0x79, 0x07, 0x7d, 0xbc, 0x5d, 0xd7, 0xf6, 0x8d, 0xa0, 0x5e,
- 0xee, 0x35, 0x30, 0xab, 0xa6, 0x6b, 0x0d, 0xcc, 0xaa, 0xe5, 0xda, 0xcd, 0x63, 0x74, 0xb6, 0xeb,
- 0xd3, 0x47, 0x68, 0xe9, 0x16, 0x3d, 0x4e, 0x3d, 0xcc, 0x81, 0x36, 0xb0, 0x5a, 0xfe, 0x42, 0x3d,
- 0x8e, 0x13, 0x6e, 0xf9, 0x45, 0x17, 0xeb, 0x7b, 0x57, 0x53, 0x44, 0xfb, 0xe4, 0xcd, 0x79, 0xff,
- 0xa2, 0xe7, 0x12, 0x5a, 0xc3, 0x7b, 0xef, 0x7a, 0x27, 0x17, 0xfd, 0xe1, 0x5b, 0x17, 0x14, 0x9c,
- 0xf5, 0x86, 0xa7, 0x0a, 0x2a, 0x0a, 0x06, 0xef, 0xfb, 0x43, 0x05, 0x46, 0xf7, 0xe5, 0x6a, 0xcd,
- 0xc8, 0xcd, 0x9a, 0x91, 0xdb, 0x35, 0x83, 0xef, 0x19, 0x83, 0x5f, 0x19, 0x83, 0xeb, 0x8c, 0xc1,
- 0x2a, 0x63, 0xf0, 0x37, 0x63, 0xf0, 0x2f, 0x63, 0xe4, 0x36, 0x63, 0xf0, 0x73, 0xc3, 0xc8, 0x6a,
- 0xc3, 0xc8, 0xcd, 0x86, 0x91, 0xb1, 0xad, 0x9f, 0xc6, 0xd1, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0xab, 0x96, 0x85, 0x85, 0x86, 0x02, 0x00, 0x00,
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go
index e170382f1834c..8de926d83e2cc 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go
@@ -8,16 +8,14 @@ import (
"github.com/prometheus/prometheus/pkg/labels"
)
-var labelNameBytes = []byte(model.MetricNameLabel)
-
-// MetricNameFromLabelPairs extracts the metric name from a list of LabelPairs.
-func MetricNameFromLabelPairs(labels []client.LabelPair) ([]byte, error) {
+// MetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs.
+func MetricNameFromLabelAdapters(labels []client.LabelAdapter) (string, error) {
for _, label := range labels {
- if label.Name.Equal(labelNameBytes) {
+ if label.Name == model.MetricNameLabel {
return label.Value, nil
}
}
- return nil, fmt.Errorf("No metric name label")
+ return "", fmt.Errorf("No metric name label")
}
// MetricNameFromMetric extract the metric name from a model.Metric
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go
new file mode 100644
index 0000000000000..d61c99b2028ec
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go
@@ -0,0 +1,26 @@
+package flagext
+
+import (
+ "flag"
+
+ "github.com/cortexproject/cortex/pkg/util"
+ "github.com/go-kit/kit/log/level"
+)
+
+type deprecatedFlag struct {
+ name string
+}
+
+func (deprecatedFlag) String() string {
+ return "deprecated"
+}
+
+func (d deprecatedFlag) Set(string) error {
+ level.Warn(util.Logger).Log("msg", "flag disabled", "flag", d.name)
+ return nil
+}
+
+// DeprecatedFlag logs a warning when you try to use it.
+func DeprecatedFlag(f *flag.FlagSet, name, message string) {
+ f.Var(deprecatedFlag{name}, name, message)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go b/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go
index ba0a03801e2ed..209b8b45c0646 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go
@@ -9,6 +9,6 @@ import "github.com/prometheus/common/model"
// function we use is prone to only change a few bits for similar metrics. We
// really want to make use of every change in the fingerprint to vary mutex
// selection.)
-func HashFP(fp model.Fingerprint) uint {
- return uint(fp ^ (fp >> 32) ^ (fp >> 16))
+func HashFP(fp model.Fingerprint) uint32 {
+ return uint32(fp ^ (fp >> 32) ^ (fp >> 16))
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log.go
index 6460e2dd14750..908378ab47415 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/log.go
@@ -44,8 +44,14 @@ func InitLogger(cfg *server.Config) {
panic(err)
}
- Logger = l
- cfg.Log = logging.GoKit(l)
+ // when use util.Logger, skip 3 stack frames.
+ Logger = log.With(l, "caller", log.Caller(3))
+
+ // cfg.Log wraps log function, skip 4 stack frames to get caller information.
+ // this works in go 1.12, but doesn't work in versions earlier.
+ // it will always shows the wrapper function generated by compiler
+ // marked <autogenerated> in old versions.
+ cfg.Log = logging.GoKit(log.With(l, "caller", log.Caller(4)))
}
// PrometheusLogger exposes Prometheus counters for each of go-kit's log levels.
@@ -68,8 +74,8 @@ func NewPrometheusLogger(l logging.Level) (log.Logger, error) {
logger: logger,
}
- // DefaultCaller must be the last wrapper
- logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
+ // return a Logger without caller information, shouldn't use directly
+ logger = log.With(logger, "ts", log.DefaultTimestampUTC)
return logger, nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
index a2186c96a901b..0837b86ab1c3a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
@@ -3,8 +3,6 @@ package validation
import (
"flag"
"time"
-
- "github.com/cortexproject/cortex/pkg/util/flagext"
)
// Limits describe all the limits for users; can be used to describe global default
@@ -28,8 +26,10 @@ type Limits struct {
MaxSeriesPerMetric int `yaml:"max_series_per_metric"`
// Querier enforced limits.
- MaxChunksPerQuery int `yaml:"max_chunks_per_query"`
- MaxQueryLength time.Duration `yaml:"max_query_length"`
+ MaxChunksPerQuery int `yaml:"max_chunks_per_query"`
+ MaxQueryLength time.Duration `yaml:"max_query_length"`
+ MaxQueryParallelism int `yaml:"max_query_parallelism"`
+ CardinalityLimit int `yaml:"cardinality_limit"`
// Config for overrides, convenient if it goes here.
PerTenantOverrideConfig string
@@ -55,6 +55,8 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query.")
f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit to length of chunk store queries, 0 to disable.")
+ f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.")
+ f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.")
f.StringVar(&l.PerTenantOverrideConfig, "limits.per-user-override-config", "", "File name of per-user overrides.")
f.DurationVar(&l.PerTenantOverridePeriod, "limits.per-user-override-period", 10*time.Second, "Period with this to reload the overrides.")
@@ -65,7 +67,7 @@ func (l *Limits) UnmarshalYAML(unmarshal func(interface{}) error) error {
// We want to set c to the defaults and then overwrite it with the input.
// To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
// again, we have to hide it using a type indirection. See prometheus/config.
- flagext.DefaultValues(l)
+ *l = defaultLimits
type plain Limits
return unmarshal((*plain)(l))
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go
index ce810e69e9b75..a2553f7de813a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/override.go
@@ -18,6 +18,12 @@ var overridesReloadSuccess = promauto.NewGauge(prometheus.GaugeOpts{
Help: "Whether the last overrides reload attempt was successful.",
})
+// When we load YAML from disk, we want the various per-customer limits
+// to default to any values specified on the command line, not default
+// command line values. This global contains those values. I (Tom) cannot
+// find a nicer way I'm afraid.
+var defaultLimits Limits
+
// Overrides periodically fetch a set of per-user overrides, and provides convenience
// functions for fetching the correct value.
type Overrides struct {
@@ -28,7 +34,12 @@ type Overrides struct {
}
// NewOverrides makes a new Overrides.
+// We store the supplied limits in a global variable to ensure per-tenant limits
+// are defaulted to those values. As such, the last call to NewOverrides will
+// become the new global defaults.
func NewOverrides(defaults Limits) (*Overrides, error) {
+ defaultLimits = defaults
+
if defaults.PerTenantOverrideConfig == "" {
level.Info(util.Logger).Log("msg", "per-tenant overides disabled")
return &Overrides{
@@ -242,9 +253,24 @@ func (o *Overrides) MaxQueryLength(userID string) time.Duration {
})
}
+// MaxQueryParallelism returns the limit to the number of sub-queries the
+// frontend will process in parallel.
+func (o *Overrides) MaxQueryParallelism(userID string) int {
+ return o.getInt(userID, func(l *Limits) int {
+ return l.MaxQueryParallelism
+ })
+}
+
// EnforceMetricName whether to enforce the presence of a metric name.
func (o *Overrides) EnforceMetricName(userID string) bool {
return o.getBool(userID, func(l *Limits) bool {
return l.EnforceMetricName
})
}
+
+// CardinalityLimit whether to enforce the presence of a metric name.
+func (o *Overrides) CardinalityLimit(userID string) int {
+ return o.getInt(userID, func(l *Limits) int {
+ return l.CardinalityLimit
+ })
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
index 750ceb25b910c..3aff0fa72d7a1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
@@ -22,6 +22,9 @@ const (
errTooOld = "sample for '%s' has timestamp too old: %d"
errTooNew = "sample for '%s' has timestamp too new: %d"
+ // ErrQueryTooLong is used in chunk store and query frontend.
+ ErrQueryTooLong = "invalid query, length > limit (%s > %s)"
+
greaterThanMaxSampleAge = "greater_than_max_sample_age"
maxLabelNamesPerSeries = "max_label_names_per_series"
tooFarInFuture = "too_far_in_future"
@@ -48,7 +51,7 @@ func init() {
}
// ValidateSample returns an err if the sample is invalid.
-func (cfg *Overrides) ValidateSample(userID string, metricName []byte, s client.Sample) error {
+func (cfg *Overrides) ValidateSample(userID string, metricName string, s client.Sample) error {
if cfg.RejectOldSamples(userID) && model.Time(s.TimestampMs) < model.Now().Add(-cfg.RejectOldSamplesMaxAge(userID)) {
DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc()
return httpgrpc.Errorf(http.StatusBadRequest, errTooOld, metricName, model.Time(s.TimestampMs))
@@ -63,8 +66,8 @@ func (cfg *Overrides) ValidateSample(userID string, metricName []byte, s client.
}
// ValidateLabels returns an err if the labels are invalid.
-func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error {
- metricName, err := extract.MetricNameFromLabelPairs(ls)
+func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelAdapter) error {
+ metricName, err := extract.MetricNameFromLabelAdapters(ls)
if cfg.EnforceMetricName(userID) {
if err != nil {
return httpgrpc.Errorf(http.StatusBadRequest, errMissingMetricName)
@@ -78,7 +81,7 @@ func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error
numLabelNames := len(ls)
if numLabelNames > cfg.MaxLabelNamesPerSeries(userID) {
DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc()
- return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, client.FromLabelPairs(ls).String(), numLabelNames, cfg.MaxLabelNamesPerSeries(userID))
+ return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, client.FromLabelAdaptersToMetric(ls).String(), numLabelNames, cfg.MaxLabelNamesPerSeries(userID))
}
maxLabelNameLength := cfg.MaxLabelNameLength(userID)
@@ -102,7 +105,7 @@ func (cfg *Overrides) ValidateLabels(userID string, ls []client.LabelPair) error
}
if errTemplate != "" {
DiscardedSamples.WithLabelValues(reason, userID).Inc()
- return httpgrpc.Errorf(http.StatusBadRequest, errTemplate, cause, client.FromLabelPairs(ls).String())
+ return httpgrpc.Errorf(http.StatusBadRequest, errTemplate, cause, client.FromLabelAdaptersToMetric(ls).String())
}
}
return nil
|
vendor
|
update cortex (#512)
|
59ade9b10559cdff5a4f42f14a93a7a393737e1b
|
2025-02-04 20:13:50
|
Mateusz Drab
|
docs: add match_first_network to docker_sd_configs docs (#16081)
| false
|
diff --git a/docs/sources/send-data/promtail/configuration.md b/docs/sources/send-data/promtail/configuration.md
index 2d768f22f3667..70696f32bc002 100644
--- a/docs/sources/send-data/promtail/configuration.md
+++ b/docs/sources/send-data/promtail/configuration.md
@@ -2034,6 +2034,11 @@ tls_config:
# The host to use if the container is in host networking mode.
[ host_networking_host: <string> | default = "localhost" ]
+# Sort all non-nil networks in ascending order based on network name and
+# get the first network if the container has multiple networks defined,
+# thus avoiding collecting duplicate targets.
+[ match_first_network: <bool> | default = true ]
+
# Optional filters to limit the discovery process to a subset of available
# resources.
# The available filters are listed in the Docker documentation:
|
docs
|
add match_first_network to docker_sd_configs docs (#16081)
|
8395acd0cbd3db9c6f330bd94a22b194fad35a93
|
2024-11-01 18:49:38
|
renovate[bot]
|
fix(deps): update module github.com/ibm/go-sdk-core/v5 to v5.18.1 (#14716)
| false
|
diff --git a/go.mod b/go.mod
index b57bcd68ffef7..a43668e3439dc 100644
--- a/go.mod
+++ b/go.mod
@@ -115,7 +115,7 @@ require (
github.com/Azure/go-autorest/autorest v0.11.29
github.com/DataDog/sketches-go v1.4.6
github.com/DmitriyVTitov/size v1.5.0
- github.com/IBM/go-sdk-core/v5 v5.17.5
+ github.com/IBM/go-sdk-core/v5 v5.18.1
github.com/IBM/ibm-cos-sdk-go v1.11.1
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27
github.com/buger/jsonparser v1.1.1
diff --git a/go.sum b/go.sum
index a3f541f49a703..79ad0c08c7180 100644
--- a/go.sum
+++ b/go.sum
@@ -262,8 +262,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
-github.com/IBM/go-sdk-core/v5 v5.17.5 h1:AjGC7xNee5tgDIjndekBDW5AbypdERHSgib3EZ1KNsA=
-github.com/IBM/go-sdk-core/v5 v5.17.5/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns=
+github.com/IBM/go-sdk-core/v5 v5.18.1 h1:wdftQO8xejECTWTKF3FGXyW0McKxxDAopH7MKwA187c=
+github.com/IBM/go-sdk-core/v5 v5.18.1/go.mod h1:3ywpylZ41WhWPusqtpJZWopYlt2brebcphV7mA2JncU=
github.com/IBM/ibm-cos-sdk-go v1.11.1 h1:Pye61hmWA4ZVCfOfFLTJBjPka4HIGrLqmpZ2d2KlrCE=
github.com/IBM/ibm-cos-sdk-go v1.11.1/go.mod h1:d8vET3w8wgmGwCsCVs+0y4V8+1hRNT6+pbpGaEHvSCI=
github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA=
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/authenticator_factory.go b/vendor/github.com/IBM/go-sdk-core/v5/core/authenticator_factory.go
index cd1a63c905b90..586e38d072e48 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/authenticator_factory.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/authenticator_factory.go
@@ -53,6 +53,8 @@ func GetAuthenticatorFromEnvironment(credentialKey string) (authenticator Authen
authenticator, err = newBearerTokenAuthenticatorFromMap(properties)
} else if strings.EqualFold(authType, AUTHTYPE_IAM) {
authenticator, err = newIamAuthenticatorFromMap(properties)
+ } else if strings.EqualFold(authType, AUTHTYPE_IAM_ASSUME) {
+ authenticator, err = newIamAssumeAuthenticatorFromMap(properties)
} else if strings.EqualFold(authType, AUTHTYPE_CONTAINER) {
authenticator, err = newContainerAuthenticatorFromMap(properties)
} else if strings.EqualFold(authType, AUTHTYPE_VPC) {
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/base_service.go b/vendor/github.com/IBM/go-sdk-core/v5/core/base_service.go
index 62e362d4103c4..d2607440c2196 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/base_service.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/base_service.go
@@ -64,7 +64,6 @@ type ServiceOptions struct {
// BaseService implements the common functionality shared by generated services
// to manage requests and responses, authenticate outbound requests, etc.
type BaseService struct {
-
// Configuration values for a service.
Options *ServiceOptions
@@ -89,7 +88,7 @@ func NewBaseService(options *ServiceOptions) (*BaseService, error) {
}
if IsNil(options.Authenticator) {
- err := fmt.Errorf(ERRORMSG_NO_AUTHENTICATOR)
+ err := errors.New(ERRORMSG_NO_AUTHENTICATOR)
return nil, SDKErrorf(err, "", "missing-auth", getComponentInfo())
}
@@ -382,7 +381,7 @@ func (service *BaseService) Request(req *http.Request, result interface{}) (deta
// Add authentication to the outbound request.
if IsNil(service.Options.Authenticator) {
- err = fmt.Errorf(ERRORMSG_NO_AUTHENTICATOR)
+ err = errors.New(ERRORMSG_NO_AUTHENTICATOR)
err = SDKErrorf(err, "", "missing-auth", getComponentInfo())
return
}
@@ -423,7 +422,7 @@ func (service *BaseService) Request(req *http.Request, result interface{}) (deta
httpResponse, err = service.Client.Do(req)
if err != nil {
if strings.Contains(err.Error(), SSL_CERTIFICATION_ERROR) {
- err = fmt.Errorf(ERRORMSG_SSL_VERIFICATION_FAILED + "\n" + err.Error())
+ err = errors.New(ERRORMSG_SSL_VERIFICATION_FAILED + "\n" + err.Error())
}
err = SDKErrorf(err, "", "no-connection-made", getComponentInfo())
return
@@ -622,7 +621,6 @@ func decodeAsMap(byteBuffer []byte) (result map[string]interface{}, err error) {
// getErrorMessage: try to retrieve an error message from the decoded response body (map).
func getErrorMessage(responseMap map[string]interface{}, statusCode int) string {
-
// If the response contained the "errors" field, then try to deserialize responseMap
// into an array of Error structs, then return the first entry's "Message" field.
if _, ok := responseMap["errors"]; ok {
@@ -664,7 +662,6 @@ func getErrorMessage(responseMap map[string]interface{}, statusCode int) string
// getErrorCode tries to retrieve an error code from the decoded response body (map).
func getErrorCode(responseMap map[string]interface{}) string {
-
// If the response contained the "errors" field, then try to deserialize responseMap
// into an array of Error structs, then return the first entry's "Message" field.
if _, ok := responseMap["errors"]; ok {
@@ -784,8 +781,7 @@ func DefaultHTTPClient() *http.Client {
}
// httpLogger is a shim layer used to allow the Go core's logger to be used with the retryablehttp interfaces.
-type httpLogger struct {
-}
+type httpLogger struct{}
func (l *httpLogger) Printf(format string, inserts ...interface{}) {
if GetLogger().IsLogLevelEnabled(LevelDebug) {
@@ -923,7 +919,7 @@ func IBMCloudSDKRetryPolicy(ctx context.Context, resp *http.Response, err error)
return true, nil
}
- GetLogger().Debug("No retry for status code: %d\n")
+ GetLogger().Debug("No retry for status code: %d\n", resp.StatusCode)
return false, nil
}
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/basic_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v5/core/basic_authenticator.go
index 4506eb1847ba7..33b81401f56b1 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/basic_authenticator.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/basic_authenticator.go
@@ -15,6 +15,7 @@ package core
// limitations under the License.
import (
+ "errors"
"fmt"
"net/http"
)
@@ -47,7 +48,7 @@ func NewBasicAuthenticator(username string, password string) (*BasicAuthenticato
// from a map.
func newBasicAuthenticatorFromMap(properties map[string]string) (*BasicAuthenticator, error) {
if properties == nil {
- err := fmt.Errorf(ERRORMSG_PROPS_MAP_NIL)
+ err := errors.New(ERRORMSG_PROPS_MAP_NIL)
return nil, SDKErrorf(err, "", "missing-props", getComponentInfo())
}
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/bearer_token_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v5/core/bearer_token_authenticator.go
index ce37cb9d678d2..7e035b7141a20 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/bearer_token_authenticator.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/bearer_token_authenticator.go
@@ -15,6 +15,7 @@ package core
// limitations under the License.
import (
+ "errors"
"fmt"
"net/http"
)
@@ -24,7 +25,6 @@ import (
//
// Authorization: Bearer <bearer-token>
type BearerTokenAuthenticator struct {
-
// The bearer token value to be used to authenticate request [required].
BearerToken string
}
@@ -44,7 +44,7 @@ func NewBearerTokenAuthenticator(bearerToken string) (*BearerTokenAuthenticator,
// newBearerTokenAuthenticator : Constructs a new BearerTokenAuthenticator instance from a map.
func newBearerTokenAuthenticatorFromMap(properties map[string]string) (*BearerTokenAuthenticator, error) {
if properties == nil {
- err := fmt.Errorf(ERRORMSG_PROPS_MAP_NIL)
+ err := errors.New(ERRORMSG_PROPS_MAP_NIL)
return nil, SDKErrorf(err, "", "missing-props", getComponentInfo())
}
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/constants.go b/vendor/github.com/IBM/go-sdk-core/v5/core/constants.go
index 9be71785ed718..b6f5a7e70ba3c 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/constants.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/constants.go
@@ -20,6 +20,7 @@ const (
AUTHTYPE_BEARER_TOKEN = "bearerToken"
AUTHTYPE_NOAUTH = "noAuth"
AUTHTYPE_IAM = "iam"
+ AUTHTYPE_IAM_ASSUME = "iamAssume"
AUTHTYPE_CP4D = "cp4d"
AUTHTYPE_CONTAINER = "container"
AUTHTYPE_VPC = "vpc"
@@ -52,6 +53,7 @@ const (
PROPNAME_IAM_PROFILE_CRN = "IAM_PROFILE_CRN"
PROPNAME_IAM_PROFILE_NAME = "IAM_PROFILE_NAME"
PROPNAME_IAM_PROFILE_ID = "IAM_PROFILE_ID"
+ PROPNAME_IAM_ACCOUNT_ID = "IAM_ACCOUNT_ID"
// SSL error
SSL_CERTIFICATION_ERROR = "x509: certificate"
@@ -83,6 +85,7 @@ const (
ERRORMSG_IAM_GETTOKEN_ERROR = "IAM 'get token' error, status code %d received from '%s': %s" // #nosec G101
ERRORMSG_UNABLE_RETRIEVE_IITOKEN = "unable to retrieve instance identity token value: %s" // #nosec G101
ERRORMSG_VPCMDS_OPERATION_ERROR = "VPC metadata service error, status code %d received from '%s': %s"
+ ERRORMSG_ACCOUNTID_PROP_ERROR = "IAMAccountID must be specified if and only if IAMProfileName is specified"
// The name of this module - matches the value in the go.mod file.
MODULE_NAME = "github.com/IBM/go-sdk-core/v5"
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/container_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v5/core/container_authenticator.go
index 5919889c073f3..ebb6e4edcc9fa 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/container_authenticator.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/container_authenticator.go
@@ -17,6 +17,7 @@ package core
import (
"crypto/tls"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"net/http/httputil"
@@ -35,7 +36,6 @@ import (
//
// Authorization: Bearer <access-token>
type ContainerAuthenticator struct {
-
// [optional] The name of the file containing the injected CR token value (applies to
// IKS-managed compute resources).
// Default value: (1) "/var/run/secrets/tokens/vault-token" or (2) "/var/run/secrets/tokens/sa-token",
@@ -173,7 +173,6 @@ func (builder *ContainerAuthenticatorBuilder) SetClient(client *http.Client) *Co
// Build() returns a validated instance of the ContainerAuthenticator with the config that was set in the builder.
func (builder *ContainerAuthenticatorBuilder) Build() (*ContainerAuthenticator, error) {
-
// Make sure the config is valid.
err := builder.ContainerAuthenticator.Validate()
if err != nil {
@@ -215,7 +214,7 @@ func (authenticator *ContainerAuthenticator) getUserAgent() string {
// configuration properties.
func newContainerAuthenticatorFromMap(properties map[string]string) (authenticator *ContainerAuthenticator, err error) {
if properties == nil {
- err = fmt.Errorf(ERRORMSG_PROPS_MAP_NIL)
+ err := errors.New(ERRORMSG_PROPS_MAP_NIL)
return nil, SDKErrorf(err, "", "missing-props", getComponentInfo())
}
@@ -294,7 +293,6 @@ func (authenticator *ContainerAuthenticator) setTokenData(tokenData *iamTokenDat
// Ensures that one of IAMProfileName or IAMProfileID are specified, and the ClientId and ClientSecret pair are
// mutually inclusive.
func (authenticator *ContainerAuthenticator) Validate() error {
-
// Check to make sure that one of IAMProfileName or IAMProfileID are specified.
if authenticator.IAMProfileName == "" && authenticator.IAMProfileID == "" {
err := fmt.Errorf(ERRORMSG_ATLEAST_ONE_PROP_ERROR, "IAMProfileName", "IAMProfileID")
@@ -483,8 +481,7 @@ func (authenticator *ContainerAuthenticator) RequestToken() (*IamTokenServerResp
iamErrorMsg = string(detailedResponse.RawResult)
}
- authError.Summary =
- fmt.Sprintf(ERRORMSG_IAM_GETTOKEN_ERROR, detailedResponse.StatusCode, builder.URL, iamErrorMsg)
+ authError.Summary = fmt.Sprintf(ERRORMSG_IAM_GETTOKEN_ERROR, detailedResponse.StatusCode, builder.URL, iamErrorMsg)
return nil, authError
}
@@ -499,7 +496,6 @@ func (authenticator *ContainerAuthenticator) RequestToken() (*IamTokenServerResp
// retrieveCRToken tries to read the CR token value from the local file system.
func (authenticator *ContainerAuthenticator) retrieveCRToken() (crToken string, err error) {
-
if authenticator.CRTokenFilename != "" {
// Use the file specified by the user.
crToken, err = authenticator.readFile(authenticator.CRTokenFilename)
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/cp4d_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v5/core/cp4d_authenticator.go
index 66c1f1037bc77..fb8e304344407 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/cp4d_authenticator.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/cp4d_authenticator.go
@@ -17,6 +17,7 @@ package core
import (
"crypto/tls"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"net/http/httputil"
@@ -99,7 +100,6 @@ func NewCloudPakForDataAuthenticatorUsingAPIKey(url string, username string, api
func newAuthenticator(url string, username string, password string, apikey string,
disableSSLVerification bool, headers map[string]string) (authenticator *CloudPakForDataAuthenticator, err error) {
-
authenticator = &CloudPakForDataAuthenticator{
Username: username,
Password: password,
@@ -121,7 +121,7 @@ func newAuthenticator(url string, username string, password string, apikey strin
// newCloudPakForDataAuthenticatorFromMap : Constructs a new CloudPakForDataAuthenticator instance from a map.
func newCloudPakForDataAuthenticatorFromMap(properties map[string]string) (*CloudPakForDataAuthenticator, error) {
if properties == nil {
- err := fmt.Errorf(ERRORMSG_PROPS_MAP_NIL)
+ err := errors.New(ERRORMSG_PROPS_MAP_NIL)
return nil, SDKErrorf(err, "", "missing-props", getComponentInfo())
}
@@ -145,7 +145,6 @@ func (*CloudPakForDataAuthenticator) AuthenticationType() string {
// Ensures the username, password, and url are not Nil. Additionally, ensures
// they do not contain invalid characters.
func (authenticator *CloudPakForDataAuthenticator) Validate() error {
-
if authenticator.Username == "" {
err := fmt.Errorf(ERRORMSG_PROP_MISSING, "Username")
return SDKErrorf(err, "", "no-user", getComponentInfo())
@@ -301,7 +300,6 @@ type cp4dRequestBody struct {
// requestToken: fetches a new access token from the token server.
func (authenticator *CloudPakForDataAuthenticator) requestToken() (tokenResponse *cp4dTokenServerResponse, err error) {
-
// Create the request body (only one of APIKey or Password should be set
// on the authenticator so only one of them should end up in the serialized JSON).
body := &cp4dRequestBody{
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/iam_assume_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v5/core/iam_assume_authenticator.go
new file mode 100644
index 0000000000000..0132b41f21a54
--- /dev/null
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/iam_assume_authenticator.go
@@ -0,0 +1,519 @@
+package core
+
+// (C) Copyright IBM Corp. 2024.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/http/httputil"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// IamAssumeAuthenticator obtains an IAM access token using the IAM "get-token" operation's
+// "assume" grant type. The authenticator obtains an initial IAM access token from a
+// user-supplied apikey, then exchanges this initial IAM access token for another IAM access token
+// that has "assumed the identity" of the specified trusted profile.
+//
+// The resulting IAM access token is added to each outbound request
+// in an Authorization header of the form:
+//
+// Authorization: Bearer <access-token>
+type IamAssumeAuthenticator struct {
+
+ // Specify exactly one of [iamProfileID, iamProfileCRN, or iamProfileName] to
+ // identify the trusted profile whose identity should be used.
+ // If iamProfileID or iamProfileCRN is used, the trusted profile must exist
+ // in the same account.
+ // If and only if iamProfileName is used, then iamAccountID must also be
+ // specified to indicate the account that contains the trusted profile.
+ iamProfileID string
+ iamProfileCRN string
+ iamProfileName string
+
+ // If and only if iamProfileName is used to specify the trusted profile,
+ // then iamAccountID must also be specified to indicate the account that
+ // contains the trusted profile.
+ iamAccountID string
+
+ // The URL representing the IAM token server's endpoint; If not specified,
+ // a suitable default value will be used [optional].
+ url string
+ urlInit sync.Once
+
+ // A flag that indicates whether verification of the server's SSL certificate
+ // should be disabled; defaults to false [optional].
+ disableSSLVerification bool
+
+ // A set of key/value pairs that will be sent as HTTP headers in requests
+ // made to the token server [optional].
+ headers map[string]string
+
+ // The http.Client object used to invoke token server requests.
+ // If not specified by the user, a suitable default Client will be constructed [optional].
+ client *http.Client
+ clientInit sync.Once
+
+ // The User-Agent header value to be included with each token request.
+ userAgent string
+ userAgentInit sync.Once
+
+ // The cached token and expiration time.
+ tokenData *iamTokenData
+
+ // Mutex to make the tokenData field thread safe.
+ tokenDataMutex sync.Mutex
+
+ // An IamAuthenticator instance used to obtain the user's IAM access token from the apikey.
+ iamDelegate *IamAuthenticator
+}
+
+const (
+ iamGrantTypeAssume = "urn:ibm:params:oauth:grant-type:assume"
+)
+
+var (
+ iamAssumeRequestTokenMutex sync.Mutex
+)
+
+// IamAssumeAuthenticatorBuilder is used to construct an IamAssumeAuthenticator instance.
+type IamAssumeAuthenticatorBuilder struct {
+
+ // Properties needed to construct an IamAuthenticator instance.
+ IamAuthenticator
+
+ // Properties needed to construct an IamAssumeAuthenticator instance.
+ IamAssumeAuthenticator
+}
+
+// NewIamAssumeAuthenticatorBuilder returns a new builder struct that
+// can be used to construct an IamAssumeAuthenticator instance.
+func NewIamAssumeAuthenticatorBuilder() *IamAssumeAuthenticatorBuilder {
+ return &IamAssumeAuthenticatorBuilder{}
+}
+
+// SetIAMProfileID sets the iamProfileID field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetIAMProfileID(s string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAssumeAuthenticator.iamProfileID = s
+ return builder
+}
+
+// SetIAMProfileCRN sets the iamProfileCRN field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetIAMProfileCRN(s string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAssumeAuthenticator.iamProfileCRN = s
+ return builder
+}
+
+// SetIAMProfileName sets the iamProfileName field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetIAMProfileName(s string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAssumeAuthenticator.iamProfileName = s
+ return builder
+}
+
+// SetIAMAccountID sets the iamAccountID field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetIAMAccountID(s string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAssumeAuthenticator.iamAccountID = s
+ return builder
+}
+
+// SetApiKey sets the ApiKey field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetApiKey(s string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAuthenticator.ApiKey = s
+ return builder
+}
+
+// SetURL sets the url field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetURL(s string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAuthenticator.URL = s
+ builder.IamAssumeAuthenticator.url = s
+ return builder
+}
+
+// SetClientIDSecret sets the ClientId and ClientSecret fields in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetClientIDSecret(clientID, clientSecret string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAuthenticator.ClientId = clientID
+ builder.IamAuthenticator.ClientSecret = clientSecret
+ return builder
+}
+
+// SetDisableSSLVerification sets the DisableSSLVerification field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetDisableSSLVerification(b bool) *IamAssumeAuthenticatorBuilder {
+ builder.IamAuthenticator.DisableSSLVerification = b
+ builder.IamAssumeAuthenticator.disableSSLVerification = b
+ return builder
+}
+
+// SetScope sets the Scope field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetScope(s string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAuthenticator.Scope = s
+ return builder
+}
+
+// SetHeaders sets the Headers field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetHeaders(headers map[string]string) *IamAssumeAuthenticatorBuilder {
+ builder.IamAuthenticator.Headers = headers
+ builder.IamAssumeAuthenticator.headers = headers
+ return builder
+}
+
+// SetClient sets the Client field in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) SetClient(client *http.Client) *IamAssumeAuthenticatorBuilder {
+ builder.IamAuthenticator.Client = client
+ builder.IamAssumeAuthenticator.client = client
+ return builder
+}
+
+// Build() returns a validated instance of the IamAssumeAuthenticator with the config that was set in the builder.
+func (builder *IamAssumeAuthenticatorBuilder) Build() (*IamAssumeAuthenticator, error) {
+ err := builder.IamAuthenticator.Validate()
+ if err != nil {
+ return nil, RepurposeSDKProblem(err, "validation-failed")
+ }
+
+ err = builder.IamAssumeAuthenticator.Validate()
+ if err != nil {
+ return nil, RepurposeSDKProblem(err, "validation-failed")
+ }
+
+ // If we passed validation, then save our IamAuthenticator instance.
+ builder.IamAssumeAuthenticator.iamDelegate = &builder.IamAuthenticator
+
+ return &builder.IamAssumeAuthenticator, nil
+}
+
+// NewBuilder returns an IamAssumeAuthenticatorBuilder instance configured with the contents of "authenticator".
+func (authenticator *IamAssumeAuthenticator) NewBuilder() *IamAssumeAuthenticatorBuilder {
+ builder := &IamAssumeAuthenticatorBuilder{}
+
+ builder.IamAssumeAuthenticator.iamProfileCRN = authenticator.iamProfileCRN
+ builder.IamAssumeAuthenticator.iamProfileID = authenticator.iamProfileID
+ builder.IamAssumeAuthenticator.iamProfileName = authenticator.iamProfileName
+ builder.IamAssumeAuthenticator.iamAccountID = authenticator.iamAccountID
+ builder.IamAssumeAuthenticator.url = authenticator.url
+ builder.IamAssumeAuthenticator.headers = authenticator.headers
+ builder.IamAssumeAuthenticator.disableSSLVerification = authenticator.disableSSLVerification
+ builder.IamAssumeAuthenticator.client = authenticator.client
+
+ builder.IamAuthenticator.URL = authenticator.url
+ builder.IamAuthenticator.Client = authenticator.client
+ builder.IamAuthenticator.Headers = authenticator.headers
+ builder.IamAuthenticator.DisableSSLVerification = authenticator.disableSSLVerification
+ if authenticator.iamDelegate != nil {
+ builder.IamAuthenticator.ApiKey = authenticator.iamDelegate.ApiKey
+ builder.IamAuthenticator.ClientId = authenticator.iamDelegate.ClientId
+ builder.IamAuthenticator.ClientSecret = authenticator.iamDelegate.ClientSecret
+ builder.IamAuthenticator.Scope = authenticator.iamDelegate.Scope
+ }
+
+ return builder
+}
+
+// Validate will verify the authenticator's configuration.
+func (authenticator *IamAssumeAuthenticator) Validate() error {
+ var numParams int
+ if authenticator.iamProfileCRN != "" {
+ numParams++
+ }
+ if authenticator.iamProfileID != "" {
+ numParams++
+ }
+ if authenticator.iamProfileName != "" {
+ numParams++
+ }
+
+ // 1. The user should specify exactly one of iamProfileID, iamProfileCRN, or iamProfileName
+ if numParams != 1 {
+ err := fmt.Errorf(ERRORMSG_EXCLUSIVE_PROPS_ERROR, "iamProfileCRN, iamProfileID", "iamProfileName")
+ return SDKErrorf(err, "", "exc-props", getComponentInfo())
+ }
+
+ // 2. The user should specify iamAccountID if and only if iamProfileName is also specified.
+ if (authenticator.iamProfileName == "") != (authenticator.iamAccountID == "") {
+ err := errors.New(ERRORMSG_ACCOUNTID_PROP_ERROR)
+ return SDKErrorf(err, "", "both-props", getComponentInfo())
+ }
+
+ return nil
+}
+
+// client returns the authenticator's http client after potentially initializing it.
+func (authenticator *IamAssumeAuthenticator) getClient() *http.Client {
+ authenticator.clientInit.Do(func() {
+ if authenticator.client == nil {
+ authenticator.client = DefaultHTTPClient()
+ authenticator.client.Timeout = time.Second * 30
+
+ // If the user told us to disable SSL verification, then do it now.
+ if authenticator.disableSSLVerification {
+ transport := &http.Transport{
+ // #nosec G402
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ authenticator.client.Transport = transport
+ }
+ }
+ })
+ return authenticator.client
+}
+
+// getUserAgent returns the User-Agent header value to be included in each token request invoked by the authenticator.
+func (authenticator *IamAssumeAuthenticator) getUserAgent() string {
+ authenticator.userAgentInit.Do(func() {
+ authenticator.userAgent = fmt.Sprintf("%s/%s-%s %s", sdkName, "iam-assume-authenticator", __VERSION__, SystemInfo())
+ })
+ return authenticator.userAgent
+}
+
+// newIamAssumeAuthenticatorFromMap constructs a new IamAssumeAuthenticator instance from a map.
+func newIamAssumeAuthenticatorFromMap(properties map[string]string) (authenticator *IamAssumeAuthenticator, err error) {
+ if properties == nil {
+ err := errors.New(ERRORMSG_PROPS_MAP_NIL)
+ return nil, SDKErrorf(err, "", "missing-props", getComponentInfo())
+ }
+
+ disableSSL, err := strconv.ParseBool(properties[PROPNAME_AUTH_DISABLE_SSL])
+ if err != nil {
+ disableSSL = false
+ }
+
+ authenticator, err = NewIamAssumeAuthenticatorBuilder().
+ SetIAMProfileID(properties[PROPNAME_IAM_PROFILE_ID]).
+ SetIAMProfileCRN(properties[PROPNAME_IAM_PROFILE_CRN]).
+ SetIAMProfileName(properties[PROPNAME_IAM_PROFILE_NAME]).
+ SetIAMAccountID(properties[PROPNAME_IAM_ACCOUNT_ID]).
+ SetApiKey(properties[PROPNAME_APIKEY]).
+ SetURL(properties[PROPNAME_AUTH_URL]).
+ SetClientIDSecret(properties[PROPNAME_CLIENT_ID], properties[PROPNAME_CLIENT_SECRET]).
+ SetDisableSSLVerification(disableSSL).
+ SetScope(properties[PROPNAME_SCOPE]).
+ Build()
+
+ return
+}
+
+// AuthenticationType returns the authentication type for this authenticator.
+func (*IamAssumeAuthenticator) AuthenticationType() string {
+ return AUTHTYPE_IAM_ASSUME
+}
+
+// Authenticate adds IAM authentication information to the request.
+//
+// The IAM access token will be added to the request's headers in the form:
+//
+// Authorization: Bearer <access-token>
+func (authenticator *IamAssumeAuthenticator) Authenticate(request *http.Request) error {
+ token, err := authenticator.GetToken()
+ if err != nil {
+ return RepurposeSDKProblem(err, "get-token-fail")
+ }
+
+ request.Header.Set("Authorization", "Bearer "+token)
+ GetLogger().Debug("Authenticated outbound request (type=%s)\n", authenticator.AuthenticationType())
+ return nil
+}
+
+// getURL returns the authenticator's URL property after potentially initializing it.
+func (authenticator *IamAssumeAuthenticator) getURL() string {
+ authenticator.urlInit.Do(func() {
+ if authenticator.url == "" {
+ // If URL was not specified, then use the default IAM endpoint.
+ authenticator.url = defaultIamTokenServerEndpoint
+ } else {
+ // Canonicalize the URL by removing the operation path if it was specified by the user.
+ authenticator.url = strings.TrimSuffix(authenticator.url, iamAuthOperationPathGetToken)
+ }
+ })
+ return authenticator.url
+}
+
+// getTokenData returns the tokenData field from the authenticator.
+func (authenticator *IamAssumeAuthenticator) getTokenData() *iamTokenData {
+ authenticator.tokenDataMutex.Lock()
+ defer authenticator.tokenDataMutex.Unlock()
+
+ return authenticator.tokenData
+}
+
+// setTokenData sets the given iamTokenData to the tokenData field of the authenticator.
+func (authenticator *IamAssumeAuthenticator) setTokenData(tokenData *iamTokenData) {
+ authenticator.tokenDataMutex.Lock()
+ defer authenticator.tokenDataMutex.Unlock()
+
+ authenticator.tokenData = tokenData
+}
+
+// GetToken returns an access token to be used in an Authorization header.
+// Whenever a new token is needed (when a token doesn't yet exist, needs to be refreshed,
+// or the existing token has expired), a new access token is fetched from the token server.
+func (authenticator *IamAssumeAuthenticator) GetToken() (string, error) {
+ if authenticator.getTokenData() == nil || !authenticator.getTokenData().isTokenValid() {
+ GetLogger().Debug("Performing synchronous token fetch...")
+ // synchronously request the token
+ err := authenticator.synchronizedRequestToken()
+ if err != nil {
+ return "", RepurposeSDKProblem(err, "request-token-fail")
+ }
+ } else if authenticator.getTokenData().needsRefresh() {
+ GetLogger().Debug("Performing background asynchronous token fetch...")
+ // If refresh needed, kick off a go routine in the background to get a new token
+ //nolint: errcheck
+ go authenticator.invokeRequestTokenData()
+ } else {
+ GetLogger().Debug("Using cached access token...")
+ }
+
+ // return an error if the access token is not valid or was not fetched
+ if authenticator.getTokenData() == nil || authenticator.getTokenData().AccessToken == "" {
+ err := fmt.Errorf("Error while trying to get access token")
+ return "", SDKErrorf(err, "", "no-token", getComponentInfo())
+ }
+
+ return authenticator.getTokenData().AccessToken, nil
+}
+
+// synchronizedRequestToken will synchronously fetch a new access token.
+func (authenticator *IamAssumeAuthenticator) synchronizedRequestToken() error {
+ iamAssumeRequestTokenMutex.Lock()
+ defer iamAssumeRequestTokenMutex.Unlock()
+ // if cached token is still valid, then just continue to use it
+ if authenticator.getTokenData() != nil && authenticator.getTokenData().isTokenValid() {
+ return nil
+ }
+
+ return authenticator.invokeRequestTokenData()
+}
+
+// invokeRequestTokenData requests a new token from the token server and
+// unmarshals the token information to the tokenData cache. Returns
+// an error if the token was unable to be fetched, otherwise returns nil
+func (authenticator *IamAssumeAuthenticator) invokeRequestTokenData() error {
+ tokenResponse, err := authenticator.RequestToken()
+ if err != nil {
+ return err
+ }
+
+ if tokenData, err := newIamTokenData(tokenResponse); err != nil {
+ return err
+ } else {
+ authenticator.setTokenData(tokenData)
+ }
+
+ return nil
+}
+
+// RequestToken fetches a new access token from the token server and
+// returns the response structure.
+func (authenticator *IamAssumeAuthenticator) RequestToken() (*IamTokenServerResponse, error) {
+ // Step 1: Obtain the user's IAM access token.
+ userAccessToken, err := authenticator.iamDelegate.GetToken()
+ if err != nil {
+ return nil, RepurposeSDKProblem(err, "iam-error")
+ }
+
+ // Step 2: Exchange the user's access token for one that reflects the trusted profile
+ // by invoking the getToken-assume operation.
+ builder := NewRequestBuilder(POST)
+ _, err = builder.ResolveRequestURL(authenticator.getURL(), iamAuthOperationPathGetToken, nil)
+ if err != nil {
+ return nil, RepurposeSDKProblem(err, "url-resolve-error")
+ }
+
+ builder.AddHeader(CONTENT_TYPE, "application/x-www-form-urlencoded")
+ builder.AddHeader(Accept, APPLICATION_JSON)
+ builder.AddHeader(headerNameUserAgent, authenticator.getUserAgent())
+
+ builder.AddFormData("grant_type", "", "", iamGrantTypeAssume)
+ builder.AddFormData("access_token", "", "", userAccessToken)
+ if authenticator.iamProfileCRN != "" {
+ builder.AddFormData("profile_crn", "", "", authenticator.iamProfileCRN)
+ } else if authenticator.iamProfileID != "" {
+ builder.AddFormData("profile_id", "", "", authenticator.iamProfileID)
+ } else {
+ builder.AddFormData("profile_name", "", "", authenticator.iamProfileName)
+ builder.AddFormData("account", "", "", authenticator.iamAccountID)
+ }
+
+ // Add user-defined headers to request.
+ for headerName, headerValue := range authenticator.headers {
+ builder.AddHeader(headerName, headerValue)
+ }
+
+ req, err := builder.Build()
+ if err != nil {
+ return nil, RepurposeSDKProblem(err, "request-build-error")
+ }
+
+ // If debug is enabled, then dump the request.
+ if GetLogger().IsLogLevelEnabled(LevelDebug) {
+ buf, dumpErr := httputil.DumpRequestOut(req, req.Body != nil)
+ if dumpErr == nil {
+ GetLogger().Debug("Request:\n%s\n", RedactSecrets(string(buf)))
+ } else {
+ GetLogger().Debug(fmt.Sprintf("error while attempting to log outbound request: %s", dumpErr.Error()))
+ }
+ }
+
+ GetLogger().Debug("Invoking IAM 'get token (assume)' operation: %s", builder.URL)
+ resp, err := authenticator.getClient().Do(req)
+ if err != nil {
+ err = SDKErrorf(err, "", "request-error", getComponentInfo())
+ return nil, err
+ }
+ GetLogger().Debug("Returned from IAM 'get token (assume)' operation, received status code %d", resp.StatusCode)
+
+ // If debug is enabled, then dump the response.
+ if GetLogger().IsLogLevelEnabled(LevelDebug) {
+ buf, dumpErr := httputil.DumpResponse(resp, req.Body != nil)
+ if dumpErr == nil {
+ GetLogger().Debug("Response:\n%s\n", RedactSecrets(string(buf)))
+ } else {
+ GetLogger().Debug(fmt.Sprintf("error while attempting to log inbound response: %s", dumpErr.Error()))
+ }
+ }
+
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ detailedResponse, err := processErrorResponse(resp)
+ authError := authenticationErrorf(err, detailedResponse, "get_token", authenticator.getComponentInfo())
+
+ // The err Summary is typically the message computed for the HTTPError instance in
+ // processErrorResponse(). If the response body is non-JSON, the message will be generic
+ // text based on the status code but authenticators have always used the stringified
+ // RawResult, so update that here for compatibility.
+ iamErrorMsg := err.Summary
+ if detailedResponse.RawResult != nil {
+ // RawResult is only populated if the response body is
+ // non-JSON and we couldn't extract a message.
+ iamErrorMsg = string(detailedResponse.RawResult)
+ }
+
+ authError.Summary = iamErrorMsg
+
+ return nil, authError
+ }
+
+ tokenResponse := &IamTokenServerResponse{}
+ _ = json.NewDecoder(resp.Body).Decode(tokenResponse)
+ defer resp.Body.Close() // #nosec G307
+ return tokenResponse, nil
+}
+
+func (authenticator *IamAssumeAuthenticator) getComponentInfo() *ProblemComponent {
+ return NewProblemComponent("iam_identity_services", "")
+}
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/iam_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v5/core/iam_authenticator.go
index ff73a693962a1..ff083083ce5d8 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/iam_authenticator.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/iam_authenticator.go
@@ -17,6 +17,7 @@ package core
import (
"crypto/tls"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"net/http/httputil"
@@ -32,7 +33,6 @@ import (
//
// Authorization: Bearer <access-token>
type IamAuthenticator struct {
-
// The apikey used to fetch the bearer token from the IAM token server.
// You must specify either ApiKey or RefreshToken.
ApiKey string
@@ -87,8 +87,10 @@ type IamAuthenticator struct {
tokenDataMutex sync.Mutex
}
-var iamRequestTokenMutex sync.Mutex
-var iamNeedsRefreshMutex sync.Mutex
+var (
+ iamRequestTokenMutex sync.Mutex
+ iamNeedsRefreshMutex sync.Mutex
+)
const (
// The default (prod) IAM token server base endpoint address.
@@ -164,7 +166,6 @@ func (builder *IamAuthenticatorBuilder) SetClient(client *http.Client) *IamAuthe
// Build() returns a validated instance of the IamAuthenticator with the config that was set in the builder.
func (builder *IamAuthenticatorBuilder) Build() (*IamAuthenticator, error) {
-
// Make sure the config is valid.
err := builder.IamAuthenticator.Validate()
if err != nil {
@@ -206,7 +207,6 @@ func (authenticator *IamAuthenticator) getUserAgent() string {
// Deprecated - use the IamAuthenticatorBuilder instead.
func NewIamAuthenticator(apiKey string, url string, clientId string, clientSecret string,
disableSSLVerification bool, headers map[string]string) (*IamAuthenticator, error) {
-
authenticator, err := NewIamAuthenticatorBuilder().
SetApiKey(apiKey).
SetURL(url).
@@ -221,7 +221,7 @@ func NewIamAuthenticator(apiKey string, url string, clientId string, clientSecre
// newIamAuthenticatorFromMap constructs a new IamAuthenticator instance from a map.
func newIamAuthenticatorFromMap(properties map[string]string) (authenticator *IamAuthenticator, err error) {
if properties == nil {
- err := fmt.Errorf(ERRORMSG_PROPS_MAP_NIL)
+ err := errors.New(ERRORMSG_PROPS_MAP_NIL)
return nil, SDKErrorf(err, "", "missing-props", getComponentInfo())
}
@@ -310,7 +310,6 @@ func (authenticator *IamAuthenticator) setTokenData(tokenData *iamTokenData) {
// Ensures that the ApiKey and RefreshToken properties are mutually exclusive,
// and that the ClientId and ClientSecret properties are mutually inclusive.
func (authenticator *IamAuthenticator) Validate() error {
-
// The user should specify at least one of ApiKey or RefreshToken.
// Note: We'll allow both ApiKey and RefreshToken to be specified,
// in which case we'd use ApiKey in the RequestToken() method.
@@ -421,7 +420,6 @@ func (authenticator *IamAuthenticator) invokeRequestTokenData() error {
// RequestToken fetches a new access token from the token server.
func (authenticator *IamAuthenticator) RequestToken() (*IamTokenServerResponse, error) {
-
builder := NewRequestBuilder(POST)
_, err := builder.ResolveRequestURL(authenticator.url(), iamAuthOperationPathGetToken, nil)
if err != nil {
@@ -547,7 +545,6 @@ type iamTokenData struct {
// newIamTokenData: constructs a new IamTokenData instance from the specified IamTokenServerResponse instance.
func newIamTokenData(tokenResponse *IamTokenServerResponse) (*iamTokenData, error) {
-
if tokenResponse == nil {
err := fmt.Errorf("Error while trying to parse access token!")
return nil, SDKErrorf(err, "", "token-parse", getComponentInfo())
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/jwt_utils.go b/vendor/github.com/IBM/go-sdk-core/v5/core/jwt_utils.go
index 93a9e317b803d..1cb04f463c99f 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/jwt_utils.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/jwt_utils.go
@@ -17,6 +17,7 @@ package core
import (
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"strings"
)
@@ -32,7 +33,7 @@ func parseJWT(tokenString string) (claims *coreJWTClaims, err error) {
// A JWT consists of three .-separated segments
segments := strings.Split(tokenString, ".")
if len(segments) != 3 {
- err = fmt.Errorf("token contains an invalid number of segments")
+ err = errors.New("token contains an invalid number of segments")
err = SDKErrorf(err, "", "need-3-segs", getComponentInfo())
return
}
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/mcsp_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v5/core/mcsp_authenticator.go
index 38845074af0fb..1fae07ed41495 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/mcsp_authenticator.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/mcsp_authenticator.go
@@ -17,6 +17,7 @@ package core
import (
"crypto/tls"
"encoding/json"
+ "errors"
"fmt"
"net/http"
"net/http/httputil"
@@ -29,7 +30,6 @@ import (
// and adds the access token to requests via an Authorization header
// of the form: "Authorization: Bearer <access-token>"
type MCSPAuthenticator struct {
-
// [Required] The apikey used to fetch the bearer token from the token server.
ApiKey string
@@ -60,8 +60,10 @@ type MCSPAuthenticator struct {
tokenDataMutex sync.Mutex
}
-var mcspRequestTokenMutex sync.Mutex
-var mcspNeedsRefreshMutex sync.Mutex
+var (
+ mcspRequestTokenMutex sync.Mutex
+ mcspNeedsRefreshMutex sync.Mutex
+)
const (
mcspAuthOperationPath = "/siusermgr/api/1.0/apikeys/token"
@@ -110,7 +112,6 @@ func (builder *MCSPAuthenticatorBuilder) SetClient(client *http.Client) *MCSPAut
// Build() returns a validated instance of the MCSPAuthenticator with the config that was set in the builder.
func (builder *MCSPAuthenticatorBuilder) Build() (*MCSPAuthenticator, error) {
-
// Make sure the config is valid.
err := builder.MCSPAuthenticator.Validate()
if err != nil {
@@ -151,7 +152,7 @@ func (authenticator *MCSPAuthenticator) getUserAgent() string {
// newMCSPAuthenticatorFromMap constructs a new MCSPAuthenticator instance from a map.
func newMCSPAuthenticatorFromMap(properties map[string]string) (authenticator *MCSPAuthenticator, err error) {
if properties == nil {
- err = fmt.Errorf(ERRORMSG_PROPS_MAP_NIL)
+ err = errors.New(ERRORMSG_PROPS_MAP_NIL)
return nil, SDKErrorf(err, "", "missing-props", getComponentInfo())
}
@@ -209,7 +210,6 @@ func (authenticator *MCSPAuthenticator) setTokenData(tokenData *mcspTokenData) {
//
// Ensures that the ApiKey and URL properties are both specified.
func (authenticator *MCSPAuthenticator) Validate() error {
-
if authenticator.ApiKey == "" {
err := fmt.Errorf(ERRORMSG_PROP_MISSING, "ApiKey")
return SDKErrorf(err, "", "missing-api-key", getComponentInfo())
@@ -245,7 +245,7 @@ func (authenticator *MCSPAuthenticator) GetToken() (string, error) {
// return an error if the access token is not valid or was not fetched
if authenticator.getTokenData() == nil || authenticator.getTokenData().AccessToken == "" {
- err := fmt.Errorf("Error while trying to get access token")
+ err := errors.New("Error while trying to get access token")
return "", SDKErrorf(err, "", "no-token", getComponentInfo())
}
@@ -287,7 +287,6 @@ func (authenticator *MCSPAuthenticator) invokeRequestTokenData() error {
// RequestToken fetches a new access token from the token server.
func (authenticator *MCSPAuthenticator) RequestToken() (*MCSPTokenServerResponse, error) {
-
builder := NewRequestBuilder(POST)
_, err := builder.ResolveRequestURL(authenticator.URL, mcspAuthOperationPath, nil)
if err != nil {
@@ -388,7 +387,7 @@ type mcspTokenData struct {
// MCSPTokenServerResponse instance.
func newMCSPTokenData(tokenResponse *MCSPTokenServerResponse) (*mcspTokenData, error) {
if tokenResponse == nil || tokenResponse.Token == "" {
- err := fmt.Errorf("Error while trying to parse access token!")
+ err := errors.New("Error while trying to parse access token!")
return nil, SDKErrorf(err, "", "token-parse", getComponentInfo())
}
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/request_builder.go b/vendor/github.com/IBM/go-sdk-core/v5/core/request_builder.go
index e0c7f283ce690..df9b4c13e4d1c 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/request_builder.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/request_builder.go
@@ -18,6 +18,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"fmt"
"io"
"mime/multipart"
@@ -105,7 +106,7 @@ func (requestBuilder *RequestBuilder) WithContext(ctx context.Context) *RequestB
// invalid URL string (e.g. ":<badscheme>").
func (requestBuilder *RequestBuilder) ConstructHTTPURL(serviceURL string, pathSegments []string, pathParameters []string) (*RequestBuilder, error) {
if serviceURL == "" {
- return requestBuilder, SDKErrorf(fmt.Errorf(ERRORMSG_SERVICE_URL_MISSING), "", "no-url", getComponentInfo())
+ return requestBuilder, SDKErrorf(errors.New(ERRORMSG_SERVICE_URL_MISSING), "", "no-url", getComponentInfo())
}
var URL *url.URL
@@ -143,7 +144,7 @@ func (requestBuilder *RequestBuilder) ConstructHTTPURL(serviceURL string, pathSe
// The resulting request URL: "https://myservice.cloud.ibm.com/resource/res-123-456-789-abc/type/type-1"
func (requestBuilder *RequestBuilder) ResolveRequestURL(serviceURL string, path string, pathParams map[string]string) (*RequestBuilder, error) {
if serviceURL == "" {
- err := fmt.Errorf(ERRORMSG_SERVICE_URL_MISSING)
+ err := errors.New(ERRORMSG_SERVICE_URL_MISSING)
return requestBuilder, SDKErrorf(err, "", "service-url-missing", getComponentInfo())
}
@@ -346,7 +347,6 @@ func (requestBuilder *RequestBuilder) SetBodyContentForMultipart(contentType str
// Build builds an HTTP Request object from this RequestBuilder instance.
func (requestBuilder *RequestBuilder) Build() (req *http.Request, err error) {
-
// If the request builder contains a non-empty "Form" map, then we need to create
// a form-based request body, with the specific flavor depending on the content type.
if len(requestBuilder.Form) > 0 {
@@ -474,7 +474,6 @@ func (requestBuilder *RequestBuilder) createMultipartFormRequestBody() (bodyRead
err = SDKErrorf(err, err.Error(), "form-close-error", getComponentInfo())
return
}
-
}()
// Grab the Content-Type from the form writer (it will also contain the boundary string)
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/unmarshal_v2.go b/vendor/github.com/IBM/go-sdk-core/v5/core/unmarshal_v2.go
index f25cce88d0036..c9c8e75c080b8 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/unmarshal_v2.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/unmarshal_v2.go
@@ -18,6 +18,7 @@ package core
import (
"encoding/json"
+ "errors"
"fmt"
"reflect"
)
@@ -78,7 +79,7 @@ const (
// err = UnmarshalPrimitive(rawMessageMap, "field2", &myString.Field2)
func UnmarshalPrimitive(rawInput map[string]json.RawMessage, propertyName string, result interface{}) (err error) {
if propertyName == "" {
- err = fmt.Errorf(errorPropertyNameMissing)
+ err = errors.New(errorPropertyNameMissing)
err = SDKErrorf(err, "", "no-prop-name", getComponentInfo())
}
@@ -186,10 +187,9 @@ type ModelUnmarshaller func(rawInput map[string]json.RawMessage, result interfac
//
// -------------------+--------------------------+------------------------------------------------------------------
func UnmarshalModel(rawInput interface{}, propertyName string, result interface{}, unmarshaller ModelUnmarshaller) (err error) {
-
// Make sure some input is provided. Otherwise return an error.
if IsNil(rawInput) {
- err = fmt.Errorf(errorUnmarshallInputIsNil)
+ err = errors.New(errorUnmarshallInputIsNil)
err = SDKErrorf(err, "", "no-input", getComponentInfo())
return
}
@@ -615,7 +615,6 @@ func unmarshalModelSliceMap(rawInput interface{}, propertyName string, result in
if foundInput && rawMap != nil {
for k, v := range rawMap {
-
// Make sure our slice raw message isn't an explicit JSON null value.
if !isJsonNull(v) {
// Each value in 'rawMap' should contain an instance of []<model>.
@@ -722,7 +721,7 @@ func getUnmarshalInputSourceSlice(rawInput interface{}, propertyName string) (fo
return
} else {
// We found the property in the map, so unmarshal the json.RawMessage into a []json.RawMessage
- var rawSlice = make([]json.RawMessage, 0)
+ rawSlice := make([]json.RawMessage, 0)
err = json.Unmarshal(rawMsg, &rawSlice)
if err != nil {
err = fmt.Errorf(errorIncorrectInputType, "map[string][]json.RawMessage", reflect.TypeOf(rawInput).String())
@@ -752,7 +751,7 @@ func getUnmarshalInputSourceSlice(rawInput interface{}, propertyName string) (fo
// isJsonNull returns true iff 'rawMsg' is exlicitly nil or contains a JSON "null" value.
func isJsonNull(rawMsg json.RawMessage) bool {
- var nullLiteral = []byte("null")
+ nullLiteral := []byte("null")
if rawMsg == nil || string(rawMsg) == string(nullLiteral) {
return true
}
@@ -773,7 +772,7 @@ func getUnmarshalResultType(result interface{}) (ptrType reflect.Type, err error
rResultType := reflect.TypeOf(result).Elem().Elem()
switch rResultType.Kind() {
case reflect.Struct, reflect.Slice:
- ptrType = reflect.PtrTo(rResultType)
+ ptrType = reflect.PointerTo(rResultType)
case reflect.Interface:
ptrType = rResultType
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/utils.go b/vendor/github.com/IBM/go-sdk-core/v5/core/utils.go
index e0426a141465c..72e26c21cd438 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/utils.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/utils.go
@@ -16,6 +16,7 @@ package core
import (
"encoding/json"
+ "errors"
"fmt"
"net/url"
"os"
@@ -59,7 +60,7 @@ func IsNil(object interface{}) bool {
// ValidateNotNil returns the specified error if 'object' is nil, nil otherwise.
func ValidateNotNil(object interface{}, errorMsg string) error {
if IsNil(object) {
- err := fmt.Errorf(errorMsg)
+ err := errors.New(errorMsg)
return SDKErrorf(err, "", "obj-is-nil", getComponentInfo())
}
return nil
@@ -207,7 +208,7 @@ func ConvertSlice(slice interface{}) (s []string, err error) {
inputIsSlice := false
if IsNil(slice) {
- err = fmt.Errorf(ERRORMSG_NIL_SLICE)
+ err = errors.New(ERRORMSG_NIL_SLICE)
err = SDKErrorf(err, "", "nil-slice", getComponentInfo())
return
}
@@ -223,7 +224,7 @@ func ConvertSlice(slice interface{}) (s []string, err error) {
// If it's not a slice, just return an error
if !inputIsSlice {
- err = fmt.Errorf(ERRORMSG_PARAM_NOT_SLICE)
+ err = errors.New(ERRORMSG_PARAM_NOT_SLICE)
err = SDKErrorf(err, "", "param-not-slice", getComponentInfo())
return
} else if reflect.ValueOf(slice).Len() == 0 {
@@ -263,7 +264,7 @@ func ConvertSlice(slice interface{}) (s []string, err error) {
return
}
- err = fmt.Errorf(ERRORMSG_CONVERT_SLICE)
+ err = errors.New(ERRORMSG_CONVERT_SLICE)
return nil, SDKErrorf(err, "", "cant-convert-slice", getComponentInfo())
}
@@ -403,13 +404,15 @@ var redactedKeywords = []string{
var redactedTokens = strings.Join(redactedKeywords, "|")
// Pre-compiled regular expressions used by RedactSecrets().
-var reAuthHeader = regexp.MustCompile(`(?m)^(Authorization|X-Auth\S*): .*`)
-var rePropertySetting = regexp.MustCompile(`(?i)(` + redactedTokens + `)=[^&]*(&|$)`)
-var reJsonField = regexp.MustCompile(`(?i)"([^"]*(` + redactedTokens + `)[^"_]*)":\s*"[^\,]*"`)
+var (
+ reAuthHeader = regexp.MustCompile(`(?m)^(Authorization|X-Auth\S*): .*`)
+ rePropertySetting = regexp.MustCompile(`(?i)(` + redactedTokens + `)=[^&]*(&|$)`)
+ reJsonField = regexp.MustCompile(`(?i)"([^"]*(` + redactedTokens + `)[^"_]*)":\s*"[^\,]*"`)
+)
// RedactSecrets() returns the input string with secrets redacted.
func RedactSecrets(input string) string {
- var redacted = "[redacted]"
+ redacted := "[redacted]"
redactedString := input
redactedString = reAuthHeader.ReplaceAllString(redactedString, "$1: "+redacted)
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/version.go b/vendor/github.com/IBM/go-sdk-core/v5/core/version.go
index 3921e502e8614..09086ce609a17 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/version.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/version.go
@@ -15,4 +15,4 @@ package core
// limitations under the License.
// Version of the SDK
-const __VERSION__ = "5.17.5"
+const __VERSION__ = "5.18.1"
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/vpc_instance_authenticator.go b/vendor/github.com/IBM/go-sdk-core/v5/core/vpc_instance_authenticator.go
index 4c60ee721773c..684e9a67664b4 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/vpc_instance_authenticator.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/vpc_instance_authenticator.go
@@ -16,6 +16,7 @@ package core
import (
"encoding/json"
+ "errors"
"fmt"
"net/http"
"net/http/httputil"
@@ -35,7 +36,6 @@ import (
//
// Authorization: Bearer <access-token>
type VpcInstanceAuthenticator struct {
-
// [optional] The CRN of the linked trusted IAM profile to be used as the identity of the compute resource.
// At most one of IAMProfileCRN or IAMProfileID may be specified. If neither one is specified, then
// the default IAM profile defined for the compute resource will be used.
@@ -116,7 +116,6 @@ func (builder *VpcInstanceAuthenticatorBuilder) SetClient(client *http.Client) *
// Build() returns a validated instance of the VpcInstanceAuthenticator with the config that was set in the builder.
func (builder *VpcInstanceAuthenticatorBuilder) Build() (*VpcInstanceAuthenticator, error) {
-
// Make sure the config is valid.
err := builder.VpcInstanceAuthenticator.Validate()
if err != nil {
@@ -159,7 +158,7 @@ func (authenticator *VpcInstanceAuthenticator) url() string {
// configuration properties.
func newVpcInstanceAuthenticatorFromMap(properties map[string]string) (authenticator *VpcInstanceAuthenticator, err error) {
if properties == nil {
- err = fmt.Errorf(ERRORMSG_PROPS_MAP_NIL)
+ err = errors.New(ERRORMSG_PROPS_MAP_NIL)
return nil, SDKErrorf(err, "", "missing-props", getComponentInfo())
}
@@ -214,7 +213,6 @@ func (authenticator *VpcInstanceAuthenticator) setTokenData(tokenData *iamTokenD
// Ensures that one of IAMProfileName or IAMProfileID are specified, and the ClientId and ClientSecret pair are
// mutually inclusive.
func (authenticator *VpcInstanceAuthenticator) Validate() error {
-
// Check to make sure that at most one of IAMProfileCRN or IAMProfileID are specified.
if authenticator.IAMProfileCRN != "" && authenticator.IAMProfileID != "" {
err := fmt.Errorf(ERRORMSG_ATMOST_ONE_PROP_ERROR, "IAMProfileCRN", "IAMProfileID")
@@ -246,7 +244,7 @@ func (authenticator *VpcInstanceAuthenticator) GetToken() (string, error) {
// return an error if the access token is not valid or was not fetched
if authenticator.getTokenData() == nil || authenticator.getTokenData().AccessToken == "" {
- err := fmt.Errorf("Error while trying to get access token")
+ err := errors.New("Error while trying to get access token")
return "", SDKErrorf(err, "", "no-token", getComponentInfo())
}
@@ -292,7 +290,6 @@ func (authenticator *VpcInstanceAuthenticator) invokeRequestTokenData() error {
// RequestToken will use the VPC Instance Metadata Service to (1) retrieve a fresh instance identity token
// and then (2) exchange that for an IAM access token.
func (authenticator *VpcInstanceAuthenticator) RequestToken() (iamTokenResponse *IamTokenServerResponse, err error) {
-
// Retrieve the instance identity token from the VPC Instance Metadata Service.
instanceIdentityToken, err := authenticator.retrieveInstanceIdentityToken()
if err != nil {
@@ -332,7 +329,6 @@ type vpcTokenResponse struct {
// to authenticate outbound REST requests targeting IAM-secured services.
func (authenticator *VpcInstanceAuthenticator) retrieveIamAccessToken(
instanceIdentityToken string) (iamTokenResponse *IamTokenServerResponse, err error) {
-
// Set up the request for the VPC "create_iam_token" operation.
builder := NewRequestBuilder(POST)
_, err = builder.ResolveRequestURL(authenticator.url(), vpcauthOperationPathCreateIamToken, nil)
@@ -436,7 +432,6 @@ func (authenticator *VpcInstanceAuthenticator) retrieveIamAccessToken(
// retrieveInstanceIdentityToken retrieves the local compute resource's instance identity token using
// the "create_access_token" operation of the local VPC Instance Metadata Service API.
func (authenticator *VpcInstanceAuthenticator) retrieveInstanceIdentityToken() (instanceIdentityToken string, err error) {
-
// Set up the request to invoke the "create_access_token" operation.
builder := NewRequestBuilder(PUT)
_, err = builder.ResolveRequestURL(authenticator.url(), vpcauthOperationPathCreateAccessToken, nil)
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 024d0099312bd..96a7f68138bd4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -216,8 +216,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric
# github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1
## explicit; go 1.21
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping
-# github.com/IBM/go-sdk-core/v5 v5.17.5
-## explicit; go 1.20
+# github.com/IBM/go-sdk-core/v5 v5.18.1
+## explicit; go 1.21
github.com/IBM/go-sdk-core/v5/core
# github.com/IBM/ibm-cos-sdk-go v1.11.1
## explicit; go 1.19
|
fix
|
update module github.com/ibm/go-sdk-core/v5 to v5.18.1 (#14716)
|
6c49cc07305e823fc0f405f515b39e3ddc649303
|
2024-12-20 14:07:00
|
Semir Ajruli
|
fix(ci): Revert "fixed `Publish Rendered Helm Chart Diff` workflow" (#15506)
| false
|
diff --git a/.github/workflows/helm-loki-ci.yml b/.github/workflows/helm-loki-ci.yml
index 6c951cb689ff4..7ecac70227211 100644
--- a/.github/workflows/helm-loki-ci.yml
+++ b/.github/workflows/helm-loki-ci.yml
@@ -1,14 +1,14 @@
---
name: helm-loki-ci
on:
- # It runs with the configuration from base branch, so the changes of this file from the PR won't be taken into account until they are merged into main. see: https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#pull_request_target .
- # This change is required to allow this CI to be run on Pull Requests opened from a fork repository
- pull_request_target:
+ pull_request:
paths:
- "production/helm/loki/**"
jobs:
publish-diff:
+ # temporarily disable the workflow for the PRs where PRs branch is from fork.
+ if: github.event.pull_request.head.repo.full_name == github.repository
name: Publish Rendered Helm Chart Diff
runs-on: ubuntu-latest
steps:
|
fix
|
Revert "fixed `Publish Rendered Helm Chart Diff` workflow" (#15506)
|
e6cf42396f7554e46b6c331dd1938922806bcfc5
|
2024-11-01 19:49:15
|
George Robinson
|
fix: move partition_id into label to make PromQL easier (#14714)
| false
|
diff --git a/pkg/kafka/partition/metrics.go b/pkg/kafka/partition/metrics.go
index 6cafb2da40f51..7979ea70a40c0 100644
--- a/pkg/kafka/partition/metrics.go
+++ b/pkg/kafka/partition/metrics.go
@@ -2,6 +2,7 @@ package partition
import (
"math"
+ "strconv"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -12,7 +13,7 @@ import (
)
type readerMetrics struct {
- partition prometheus.Gauge
+ partition *prometheus.GaugeVec
phase *prometheus.GaugeVec
receiveDelay *prometheus.HistogramVec
recordsPerFetch prometheus.Histogram
@@ -26,10 +27,10 @@ type readerMetrics struct {
// newReaderMetrics initializes and returns a new set of metrics for the PartitionReader.
func newReaderMetrics(r prometheus.Registerer) readerMetrics {
return readerMetrics{
- partition: promauto.With(r).NewGauge(prometheus.GaugeOpts{
- Name: "loki_ingest_storage_reader_partition_id",
+ partition: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{
+ Name: "loki_ingest_storage_reader_partition",
Help: "The partition ID assigned to this reader.",
- }),
+ }, []string{"id"}),
phase: promauto.With(r).NewGaugeVec(prometheus.GaugeOpts{
Name: "loki_ingest_storage_reader_phase",
Help: "The current phase of the consumer.",
@@ -66,13 +67,13 @@ func newReaderMetrics(r prometheus.Registerer) readerMetrics {
}
func (m *readerMetrics) reportStarting(partitionID int32) {
- m.partition.Set(float64(partitionID))
+ m.partition.WithLabelValues(strconv.Itoa(int(partitionID))).Set(1)
m.phase.WithLabelValues(phaseStarting).Set(1)
m.phase.WithLabelValues(phaseRunning).Set(0)
}
func (m *readerMetrics) reportRunning(partitionID int32) {
- m.partition.Set(float64(partitionID))
+ m.partition.WithLabelValues(strconv.Itoa(int(partitionID))).Set(1)
m.phase.WithLabelValues(phaseStarting).Set(0)
m.phase.WithLabelValues(phaseRunning).Set(1)
}
|
fix
|
move partition_id into label to make PromQL easier (#14714)
|
fd99dbc926ce0b353fa2768ec29d7db5696ff449
|
2020-05-05 23:21:37
|
Ed Welch
|
dashboard: Loki Operational improvements (#2041)
| false
|
diff --git a/production/loki-mixin/dashboard-loki-operational.json b/production/loki-mixin/dashboard-loki-operational.json
index 1195140c8c3d6..ac9e6bbee0e4b 100644
--- a/production/loki-mixin/dashboard-loki-operational.json
+++ b/production/loki-mixin/dashboard-loki-operational.json
@@ -1,5626 +1,6467 @@
{
- "annotations": {
- "list": [
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ },
+ {
+ "datasource": "$logs",
+ "enable": true,
+ "expr": "{cluster=\"$cluster\", diff_namespace=\"$namespace\", container_name=\"kube-diff-logger\"}",
+ "hide": true,
+ "iconColor": "rgba(255, 96, 96, 1)",
+ "name": "deployments",
+ "showIn": 0,
+ "target": {}
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 1,
+ "id": 1599,
+ "iteration": 1588693295990,
+ "links": [
+ {
+ "icon": "dashboard",
+ "includeVars": true,
+ "keepTime": true,
+ "tags": [],
+ "targetBlank": true,
+ "title": "Chunks",
+ "tooltip": "",
+ "type": "link",
+ "url": "/grafana/d/586c2916e59d3f81282a776dbe8333e9/loki-chunks"
+ },
+ {
+ "icon": "dashboard",
+ "includeVars": true,
+ "keepTime": true,
+ "tags": [],
+ "targetBlank": true,
+ "title": "Memcached",
+ "tooltip": "",
+ "type": "link",
+ "url": "/grafana/d/8127d7f60e3a3230d7c633c8b5ddf68c/memcached"
+ },
+ {
+ "icon": "dashboard",
+ "includeVars": false,
+ "keepTime": true,
+ "tags": [],
+ "targetBlank": true,
+ "title": "Promtail",
+ "type": "link",
+ "url": "/grafana/d/739c15a23c04c6ace0d03f6e14ba26e8/loki-promtail?var-datasource=$datasource&var-cluster=$cluster&var-namespace=default"
+ },
+ {
+ "icon": "dashboard",
+ "includeVars": true,
+ "keepTime": true,
+ "tags": [],
+ "targetBlank": true,
+ "title": "BigTable Backup",
+ "type": "link",
+ "url": "/grafana/d/da5ce918801d6d9f775a8c7e9f9f339a/loki-bigtable-backup"
+ },
+ {
+ "icon": "dashboard",
+ "keepTime": true,
+ "tags": [],
+ "targetBlank": true,
+ "title": "Consul",
+ "type": "link",
+ "url": "/grafana/d/836793349c8ce200baa98f045123d19b/consul?var-datasource=$datasource&var-job=$namespace%2Fconsul"
+ }
+ ],
+ "panels": [
+ {
+ "collapsed": false,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 17,
+ "panels": [],
+ "title": "Main",
+ "type": "row"
+ },
+ {
+ "aliasColors": {
+ "5xx": "red"
+ },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 0,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 6,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
+ "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/cortex-gw\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)",
+ "legendFormat": "{{status}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Queries/Second",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 10,
+ "max": null,
+ "min": null,
+ "show": true
},
{
- "datasource": "$logs",
- "enable": true,
- "expr": "{cluster=\"$cluster\", diff_namespace=\"$namespace\", container_name=\"kube-diff-logger\"}",
- "hide": true,
- "iconColor": "rgba(255, 96, 96, 1)",
- "name": "deployments",
- "showIn": 0,
- "target": {}
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
- ]
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
- "editable": true,
- "gnetId": null,
- "graphTooltip": 1,
- "id": 37,
- "iteration": 1583182892855,
- "links": [
- {
- "icon": "dashboard",
- "includeVars": true,
- "keepTime": true,
- "tags": [],
- "targetBlank": true,
- "title": "Chunks",
- "tooltip": "",
- "type": "link",
- "url": "/grafana/d/586c2916e59d3f81282a776dbe8333e9/loki-chunks"
+ {
+ "aliasColors": {
+ "5xx": "red"
},
- {
- "icon": "dashboard",
- "includeVars": true,
- "keepTime": true,
- "tags": [],
- "targetBlank": true,
- "title": "Memcached",
- "tooltip": "",
- "type": "link",
- "url": "/grafana/d/8127d7f60e3a3230d7c633c8b5ddf68c/memcached"
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
},
- {
- "icon": "dashboard",
- "includeVars": false,
- "keepTime": true,
- "tags": [],
- "targetBlank": true,
- "title": "Promtail",
- "type": "link",
- "url": "/grafana/d/739c15a23c04c6ace0d03f6e14ba26e8/loki-promtail?var-datasource=$datasource&var-cluster=$cluster&var-namespace=default"
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 4,
+ "y": 1
},
- {
- "icon": "dashboard",
- "includeVars": true,
- "keepTime": true,
- "tags": [],
- "targetBlank": true,
- "title": "BigTable Backup",
- "type": "link",
- "url": "/grafana/d/da5ce918801d6d9f775a8c7e9f9f339a/loki-bigtable-backup"
+ "hiddenSeries": false,
+ "id": 7,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
},
- {
- "icon": "dashboard",
- "keepTime": true,
- "tags": [],
- "targetBlank": true,
- "title": "Consul",
- "type": "link",
- "url": "/grafana/d/836793349c8ce200baa98f045123d19b/consul?var-datasource=$datasource&var-job=$namespace%2Fconsul"
- }
- ],
- "panels": [
- {
- "collapsed": false,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 17,
- "panels": [],
- "title": "Main",
- "type": "row"
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
},
- {
- "aliasColors": {
- "5xx": "red"
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/cortex-gw\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))",
+ "legendFormat": "{{status}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Pushes/Second",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 10,
+ "max": null,
+ "min": null,
+ "show": true
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 4,
- "x": 0,
- "y": 1
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "hiddenSeries": false,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 8,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 11,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(5, sum by (name,level) (rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\"}[$__interval])) - \nsum by (name,level) (rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\"}[$__interval] offset 1h)))",
+ "legendFormat": "{{name}}-{{level}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Bad Words",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/cortex-gw\", route=~\"api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\")\n)",
- "legendFormat": "{{status}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Queries/Second",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 12,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant))",
+ "legendFormat": "{{tenant}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Lines Per Tenant (top 10)",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 10,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 16,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant)) / 1024 / 1024",
+ "legendFormat": "{{tenant}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "MBs Per Tenant (Top 10)",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {
- "5xx": "red"
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 4,
- "x": 4,
- "y": 1
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "hiddenSeries": false,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 4,
+ "x": 20,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 24,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "increase(kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", reason!=\"Completed\"}[30m]) > 0",
+ "legendFormat": "{{container}}-{{pod}}-{{reason}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Container Termination",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum by (status) (\nlabel_replace(\n label_replace(\n rate(loki_request_duration_seconds_count{cluster=\"$cluster\", job=\"$namespace/cortex-gw\", route=~\"api_prom_push|loki_api_v1_push\"}[5m]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n\"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))",
- "legendFormat": "{{status}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Pushes/Second",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 6
+ },
+ "hiddenSeries": false,
+ "id": 9,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/cortex-gw\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3",
+ "legendFormat": ".99",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(0.75, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/cortex-gw\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3",
+ "legendFormat": ".9",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 10,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(0.5, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/cortex-gw\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3",
+ "legendFormat": ".5",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Push Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 4,
- "x": 8,
- "y": 1
- },
- "hiddenSeries": false,
- "id": 11,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "topk(5, sum by (name,level) (rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\"}[$__interval])) - \nsum by (name,level) (rate(promtail_custom_bad_words_total{cluster=\"$cluster\", exported_namespace=\"$namespace\"}[$__interval] offset 1h)))",
- "legendFormat": "{{name}}-{{level}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Bad Words",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 12,
+ "y": 6
+ },
+ "hiddenSeries": false,
+ "id": 12,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum by (le) (job:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", cluster=~\"$cluster\"})) * 1e3",
+ "legendFormat": ".99",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(0.9, sum by (le) (job:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", cluster=~\"$cluster\"})) * 1e3",
+ "legendFormat": ".9",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(0.5, sum by (le) (job:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", cluster=~\"$cluster\"})) * 1e3",
+ "legendFormat": ".5",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Distributor Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 4,
- "x": 12,
- "y": 1
- },
- "hiddenSeries": false,
- "id": 2,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "topk(10, sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant))",
- "legendFormat": "{{tenant}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Lines Per Tenant (top 10)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 18,
+ "y": 6
+ },
+ "hiddenSeries": false,
+ "id": 71,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\", status_code!~\"5[0-9]{2}\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\"}[5m])) by (route)",
+ "interval": "",
+ "legendFormat": "{{route}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Distributor Success Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 4,
- "x": 16,
- "y": 1
- },
- "hiddenSeries": false,
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1",
+ "min": null,
+ "show": true
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "topk(10, sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (tenant)) / 1024 / 1024",
- "legendFormat": "{{tenant}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "MBs Per Tenant (Top 10)",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 12,
+ "y": 11
+ },
+ "hiddenSeries": false,
+ "id": 13,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\", cluster=~\"$cluster\"})) * 1e3",
+ "legendFormat": ".99",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(0.9, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\", cluster=~\"$cluster\"})) * 1e3",
+ "hide": false,
+ "legendFormat": ".9",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(0.5, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\", cluster=~\"$cluster\"})) * 1e3",
+ "hide": false,
+ "legendFormat": ".5",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Ingester Latency Write",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 4,
- "x": 20,
- "y": 1
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "hiddenSeries": false,
- "id": 24,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 18,
+ "y": 11
+ },
+ "hiddenSeries": false,
+ "id": 72,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", status_code!~\"5[0-9]{2}\", route=\"/logproto.Pusher/Push\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", route=\"/logproto.Pusher/Push\"}[5m])) by (route)",
+ "interval": "",
+ "legendFormat": "{{route}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Ingester Success Rate Write",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1",
+ "min": null,
+ "show": true
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "increase(kube_pod_container_status_last_terminated_reason{cluster=\"$cluster\", namespace=\"$namespace\", reason!=\"Completed\"}[30m]) > 0",
- "legendFormat": "{{container}}-{{pod}}-{{reason}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Container Termination",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 10,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"}))",
+ "legendFormat": "{{route}}-.99",
+ "refId": "A"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"}))",
+ "legendFormat": "{{route}}-.9",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"}))",
+ "legendFormat": "{{route}}-.5",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Query Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 6
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "hiddenSeries": false,
- "id": 9,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 12,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 14,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"})) * 1e3",
+ "legendFormat": ".99-{{route}}",
+ "refId": "A"
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/cortex-gw\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3",
- "legendFormat": ".99",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(0.75, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/cortex-gw\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3",
- "legendFormat": ".9",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.5, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/cortex-gw\", route=~\"api_prom_push|loki_api_v1_push\", cluster=~\"$cluster\"})) * 1e3",
- "legendFormat": ".5",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Push Latency",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
+ {
+ "expr": "histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"})) * 1e3",
+ "legendFormat": ".9-{{route}}",
+ "refId": "B"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"})) * 1e3",
+ "legendFormat": ".5-{{route}}",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Querier Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 6,
- "x": 12,
- "y": 6
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 18,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 73,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", status_code!~\"5[0-9]{2}\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"}[5m])) by (route)",
+ "interval": "",
+ "legendFormat": "{{route}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Querier Success Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1",
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "hiddenSeries": false,
- "id": 12,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": false,
- "total": false,
- "values": false
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 12,
+ "y": 21
+ },
+ "hiddenSeries": false,
+ "id": 15,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\", cluster=\"$cluster\"})) * 1e3",
+ "legendFormat": ".99-{{route}}",
+ "refId": "A"
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ {
+ "expr": "histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\", cluster=\"$cluster\"})) * 1e3",
+ "legendFormat": ".9-{{route}}",
+ "refId": "B"
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum by (le) (job:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", cluster=~\"$cluster\"})) * 1e3",
- "legendFormat": ".99",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(0.9, sum by (le) (job:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", cluster=~\"$cluster\"})) * 1e3",
- "legendFormat": ".9",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.5, sum by (le) (job:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/distributor\", cluster=~\"$cluster\"})) * 1e3",
- "legendFormat": ".5",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Distributor Latency",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
+ {
+ "expr": "histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\", cluster=\"$cluster\"})) * 1e3",
+ "legendFormat": ".5-{{route}}",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Ingester Latency Read",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 6,
+ "x": 18,
+ "y": 21
+ },
+ "hiddenSeries": false,
+ "id": 74,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", status_code!~\"5[0-9]{2}\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\"}[5m])) by (route)",
+ "interval": "",
+ "legendFormat": "{{route}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Ingester Success Rate Read",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": "",
+ "logBase": 1,
+ "max": "1",
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 26
+ },
+ "id": 110,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 27
},
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "hiddenSeries": false,
+ "id": 112,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10,sum by (tenant, reason) (rate(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[1m])))",
+ "interval": "",
+ "legendFormat": "{{ tenant }} - {{ reason }}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Discarded Lines",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ },
+ {
+ "columns": [],
+ "datasource": "$datasource",
+ "fontSize": "100%",
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 27
+ },
+ "id": 113,
+ "pageSize": null,
+ "showHeader": true,
+ "sort": {
+ "col": 3,
+ "desc": true
+ },
+ "styles": [
+ {
+ "alias": "Time",
+ "align": "auto",
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "pattern": "Time",
+ "type": "hidden"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "mappingType": 1,
+ "pattern": "tenant",
+ "thresholds": [],
+ "type": "string",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "auto",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "dateFormat": "YYYY-MM-DD HH:mm:ss",
+ "decimals": 2,
+ "mappingType": 1,
+ "pattern": "reason",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ },
+ {
+ "alias": "",
+ "align": "right",
+ "colorMode": null,
+ "colors": [
+ "rgba(245, 54, 54, 0.9)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(50, 172, 45, 0.97)"
+ ],
+ "decimals": 2,
+ "pattern": "/.*/",
+ "thresholds": [],
+ "type": "number",
+ "unit": "short"
+ }
+ ],
+ "targets": [
+ {
+ "expr": "topk(10, sum by (tenant, reason) (sum_over_time(increase(loki_discarded_samples_total{cluster=\"$cluster\",namespace=\"$namespace\"}[1m])[$__range:1m])))",
+ "format": "table",
+ "instant": true,
+ "interval": "",
+ "legendFormat": "{{ tenant }} - {{ reason }}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Discarded Lines Per Interval",
+ "transform": "table",
+ "type": "table"
}
+ ],
+ "title": "Limits",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 6,
- "x": 18,
- "y": 6
+ "id": 23,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 28
+ },
+ "hiddenSeries": false,
+ "id": 26,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": true,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"distributor.*\"}",
+ "intervalFactor": 3,
+ "legendFormat": "{{pod_name}}-{{container_name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
- "hiddenSeries": false,
- "id": 71,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": false,
- "total": false,
- "values": false
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 28
+ },
+ "hiddenSeries": false,
+ "id": 27,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": true,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"distributor.*\"}",
+ "instant": false,
+ "intervalFactor": 3,
+ "legendFormat": "{{pod_name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$logs",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 4,
+ "w": 12,
+ "x": 12,
+ "y": 28
+ },
+ "hiddenSeries": false,
+ "id": 31,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "{}",
+ "color": "#C4162A"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\"} |~ \"(?i)level=error\"[1m]))",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": false,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\", status_code=~\"success|200\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\"}[5m])) by (route)",
- "legendFormat": "{{route}}",
- "refId": "A"
+ {
+ "datasource": "$logs",
+ "gridPos": {
+ "h": 18,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "id": 29,
+ "options": {
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": true
+ },
+ "targets": [
+ {
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\"} |~ \"(?i)level=error\"",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Logs",
+ "type": "logs"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 35
+ },
+ "hiddenSeries": false,
+ "id": 33,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\", status_code!~\"5[0-9]{2}\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\"}[5m])) by (route)",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{route}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Success Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Distributor Success Rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 35
+ },
+ "hiddenSeries": false,
+ "id": 32,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_distributor_ingester_append_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (ingester)",
+ "intervalFactor": 1,
+ "legendFormat": "{{ingester}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Append Failures By Ingester",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
- "yaxes": [
- {
- "decimals": null,
- "format": "percentunit",
- "label": "",
- "logBase": 1,
- "max": "1",
- "min": null,
- "show": true
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 42
},
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "hiddenSeries": false,
+ "id": 34,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (instance)",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Bytes Received/Second",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 42
+ },
+ "hiddenSeries": false,
+ "id": 35,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (instance)",
+ "intervalFactor": 1,
+ "legendFormat": "{{instance}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Lines Received/Second",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
}
+ ],
+ "title": "Distributor",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 28
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 6,
- "x": 12,
- "y": 11
- },
- "hiddenSeries": false,
- "id": 13,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": false,
- "total": false,
- "values": false
+ "id": 19,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 29
+ },
+ "hiddenSeries": false,
+ "id": 36,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": true,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"ingester.*\"}",
+ "intervalFactor": 3,
+ "legendFormat": "{{pod_name}}-{{container_name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 29
+ },
+ "hiddenSeries": false,
+ "id": 37,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": true,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"ingester.*\"}",
+ "instant": false,
+ "intervalFactor": 3,
+ "legendFormat": "{{pod_name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\", cluster=~\"$cluster\"})) * 1e3",
- "legendFormat": ".99",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(0.9, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\", cluster=~\"$cluster\"})) * 1e3",
- "hide": false,
- "legendFormat": ".9",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.5, sum by (le) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\", cluster=~\"$cluster\"})) * 1e3",
- "hide": false,
- "legendFormat": ".5",
- "refId": "C"
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$logs",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 3,
+ "w": 12,
+ "x": 12,
+ "y": 29
+ },
+ "hiddenSeries": false,
+ "id": 38,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "{}",
+ "color": "#F2495C"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\"} |~ \"(?i)level=error\"[1m]))",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": false,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Ingester Latency",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "datasource": "$logs",
+ "gridPos": {
+ "h": 18,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "id": 39,
+ "options": {
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": true
+ },
+ "targets": [
+ {
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\"} |~ \"(?i)level=error\"",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Logs",
+ "type": "logs"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 36
},
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "hiddenSeries": false,
+ "id": 67,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", status_code!~\"5[0-9]{2}\"}[1m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\"}[1m])) by (route)",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{route}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Success Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "Ingester",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 29
+ },
+ "id": 104,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 30
+ },
+ "hiddenSeries": false,
+ "id": 106,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10,sum by (tenant) (loki_ingester_memory_streams{cluster=\"$cluster\",job=\"$namespace/ingester\"}))",
+ "interval": "",
+ "legendFormat": "{{ tenant }}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Active Streams",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 30
+ },
+ "hiddenSeries": false,
+ "id": 108,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, sum by (tenant) (rate(loki_ingester_streams_created_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m]) > 0))",
+ "interval": "",
+ "legendFormat": "{{ tenant }}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Streams Created/Sec",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
}
+ ],
+ "title": "Streams",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 30
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 6,
- "x": 18,
- "y": 11
- },
- "hiddenSeries": false,
- "id": 72,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", status_code=~\"success|200\", route=\"/logproto.Pusher/Push\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", route=\"/logproto.Pusher/Push\"}[5m])) by (route)",
- "legendFormat": "{{route}}",
- "refId": "A"
+ "id": 94,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "ops-cortex",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 31
+ },
+ "hiddenSeries": false,
+ "id": 102,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "De-Dupe Ratio",
+ "yaxis": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m]))",
+ "interval": "",
+ "legendFormat": "Chunks",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(increase(cortex_chunk_store_deduped_chunks_total{cluster=\"$cluster\", job=\"$namespace/ingester\"}[1m]))/sum(increase(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=\"$namespace/ingester\"}[1m])) < 1",
+ "interval": "",
+ "legendFormat": "De-Dupe Ratio",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Chunks Flushed/Sec",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Ingester Success Rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
},
- "yaxes": [
- {
- "decimals": null,
- "format": "percentunit",
- "label": "",
- "logBase": 1,
- "max": "1",
- "min": null,
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateSpectral",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "ops-cortex",
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 31
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 100,
+ "legend": {
+ "show": true
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m])) by (le)",
+ "format": "heatmap",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{ le }}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Chunk Size Bytes",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
"show": true
},
- {
- "format": "short",
- "label": null,
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "bytes",
"logBase": 1,
"max": null,
"min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 16
- },
- "hiddenSeries": false,
- "id": 10,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "hideEmpty": true,
- "hideZero": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"}))",
- "legendFormat": "{{route}}-.99",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"}))",
- "legendFormat": "{{route}}-.9",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"}))",
- "legendFormat": "{{route}}-.5",
- "refId": "C"
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "ops-cortex",
+ "fill": 7,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 39
+ },
+ "hiddenSeries": false,
+ "id": 96,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\",reason=\"full\"}[1m]))/sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m]))",
+ "interval": "",
+ "legendFormat": "full",
+ "refId": "B"
+ },
+ {
+ "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\",reason=\"synced\"}[1m]))/sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m]))",
+ "interval": "",
+ "legendFormat": "synced",
+ "refId": "D"
+ },
+ {
+ "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\",reason=\"idle\"}[1m]))/sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m]))",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "idle",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\",reason=\"max_age\"}[1m]))/sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\",job=\"$namespace/ingester\"}[1m]))",
+ "interval": "",
+ "legendFormat": "max_age",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Chunk Flush Reason",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": "1",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Query Latency",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
+ {
+ "cards": {
+ "cardPadding": null,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateSpectral",
+ "exponent": 0.5,
"max": null,
"min": null,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "ops-cortex",
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 39
+ },
+ "heatmap": {},
+ "hideZeroBuckets": true,
+ "highlightCards": true,
+ "id": 98,
+ "legend": {
+ "show": true
+ },
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "sum by (le) (rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=\"$namespace/ingester\"}[1m]))",
+ "format": "heatmap",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{ le }}",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Chunk Utilization",
+ "tooltip": {
+ "show": true,
+ "showHistogram": false
+ },
+ "type": "heatmap",
+ "xAxis": {
"show": true
},
- {
- "format": "short",
- "label": null,
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "percentunit",
"logBase": 1,
"max": null,
"min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
}
+ ],
+ "title": "Chunks",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 31
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 6,
- "x": 12,
- "y": 16
- },
- "hiddenSeries": false,
- "id": 14,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"})) * 1e3",
- "legendFormat": ".99-{{route}}",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"})) * 1e3",
- "legendFormat": ".9-{{route}}",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/querier\", route=~\"api_prom_query|api_prom_labels|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_label|loki_api_v1_label_name_values\", cluster=\"$cluster\"})) * 1e3",
- "legendFormat": ".5-{{route}}",
- "refId": "C"
+ "id": 64,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 68,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": true,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"querier.*\"}",
+ "intervalFactor": 3,
+ "legendFormat": "{{pod_name}}-{{container_name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Querier Latency",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 32
},
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "hiddenSeries": false,
+ "id": 69,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": true,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"querier.*\"}",
+ "instant": false,
+ "intervalFactor": 3,
+ "legendFormat": "{{pod_name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 6,
- "x": 18,
- "y": 16
- },
- "hiddenSeries": false,
- "id": 73,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
},
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", status_code=~\"success|200\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"}[5m])) by (route)",
- "legendFormat": "{{route}}",
- "refId": "A"
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$logs",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 3,
+ "w": 12,
+ "x": 12,
+ "y": 32
+ },
+ "hiddenSeries": false,
+ "id": 65,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "{}",
+ "color": "#F2495C"
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"} |~ \"(?i)level=error\"[1m]))",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": false,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Querier Success Rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "datasource": "$logs",
+ "gridPos": {
+ "h": 18,
+ "w": 12,
+ "x": 12,
+ "y": 35
+ },
+ "id": 66,
+ "options": {
+ "showLabels": false,
+ "showTime": false,
+ "sortOrder": "Descending",
+ "wrapLogMessage": true
+ },
+ "targets": [
+ {
+ "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"} |~ \"(?i)level=error\"",
+ "refId": "A"
+ }
+ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Logs",
+ "type": "logs"
},
- "yaxes": [
- {
- "decimals": null,
- "format": "percentunit",
- "label": "",
- "logBase": 1,
- "max": "1",
- "min": null,
- "show": true
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 39
},
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "hiddenSeries": false,
+ "id": 70,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", status_code!~\"5[0-9]{2}\"}[1m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"}[1m])) by (route)",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{route}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Success Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
}
+ ],
+ "title": "Querier",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 32
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "description": "",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 6,
- "x": 12,
- "y": 21
- },
- "hiddenSeries": false,
- "id": 15,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(0.99, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\", cluster=\"$cluster\"})) * 1e3",
- "legendFormat": ".99-{{route}}",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(0.9, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\", cluster=\"$cluster\"})) * 1e3",
- "legendFormat": ".9-{{route}}",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.5, sum by (le,route) (job_route:loki_request_duration_seconds_bucket:sum_rate{job=\"$namespace/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\", cluster=\"$cluster\"})) * 1e3",
- "legendFormat": ".5-{{route}}",
- "refId": "C"
+ "id": 52,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 30
+ },
+ "hiddenSeries": false,
+ "id": 53,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le))",
+ "intervalFactor": 1,
+ "legendFormat": ".99-{{method}}-{{name}}",
+ "refId": "A"
+ },
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le))",
+ "hide": false,
+ "legendFormat": ".9-{{method}}-{{name}}",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le))",
+ "hide": false,
+ "legendFormat": ".5-{{method}}-{{name}}",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Latency By Method",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Ingester Latency",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 38
},
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "hiddenSeries": false,
+ "id": 54,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_memcache_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, method, name)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}-{{method}}-{{name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Status By Method",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
}
+ ],
+ "title": "Memcached",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 33
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 5,
- "w": 6,
- "x": 18,
- "y": 21
- },
- "hiddenSeries": false,
- "id": 74,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", status_code=~\"success|200\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label\"}[5m])) by (route)",
- "legendFormat": "{{route}}",
- "refId": "A"
+ "id": 57,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 31
+ },
+ "hiddenSeries": false,
+ "id": 55,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "intervalFactor": 1,
+ "legendFormat": ".99-{{operation}}",
+ "refId": "A"
+ },
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".9-{{operation}}",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".5-{{operation}}",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Latency By Operation",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Ingester Success Rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "percentunit",
- "label": "",
- "logBase": 1,
- "max": "1",
- "min": null,
- "show": true
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 39
},
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "hiddenSeries": false,
+ "id": 58,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, status_code, method)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}-{{operation}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Status By Operation",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
}
+ ],
+ "title": "Consul",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 34
},
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 26
- },
- "id": 23,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 2
- },
- "id": 26,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": true,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"distributor.*\"}",
- "intervalFactor": 3,
- "legendFormat": "{{pod_name}}-{{container_name}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "CPU Usage",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ "id": 43,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 9
+ },
+ "hiddenSeries": false,
+ "id": 41,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))",
+ "intervalFactor": 1,
+ "legendFormat": ".9",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "MutateRows Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 6,
- "y": 2
- },
- "id": 27,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": true,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"distributor.*\"}",
- "instant": false,
- "intervalFactor": 3,
- "legendFormat": "{{pod_name}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Memory Usage",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 9
},
- {
- "aliasColors": {},
- "bars": true,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$logmetrics",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 3,
- "w": 12,
- "x": 12,
- "y": 2
- },
- "id": 31,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "warn",
- "color": "#FF9830"
- },
- {
- "alias": "error",
- "color": "#F2495C"
- },
- {
- "alias": "info",
- "color": "#73BF69"
- },
- {
- "alias": "debug",
- "color": "#5794F2"
- }
- ],
- "spaceLength": 10,
- "stack": true,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\", level!~\"debug|info\"}[5m])) by (level)",
- "intervalFactor": 3,
- "legendFormat": "{{level}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ "hiddenSeries": false,
+ "id": 46,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "99%",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": false,
- "values": []
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))",
+ "interval": "",
+ "legendFormat": "90%",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))",
+ "interval": "",
+ "legendFormat": "50%",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "ReadRows Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "datasource": "$logs",
- "gridPos": {
- "h": 18,
- "w": 12,
- "x": 12,
- "y": 5
- },
- "id": 29,
- "options": {
- "showTime": false,
- "sortOrder": "Descending"
- },
- "targets": [
- {
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\", level!~\"info|debug\"}",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Logs",
- "type": "logs"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 9
- },
- "id": 33,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\", status_code=~\"success|200\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/distributor\"}[5m])) by (route)",
- "intervalFactor": 1,
- "legendFormat": "{{route}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Success Rate",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 12,
+ "y": 9
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 6,
- "y": 9
- },
- "id": 32,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_distributor_ingester_append_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (ingester)",
- "intervalFactor": 1,
- "legendFormat": "{{ingester}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Append Failures By Ingester",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ "hiddenSeries": false,
+ "id": 44,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "99%",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))",
+ "interval": "",
+ "legendFormat": "90%",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))",
+ "interval": "",
+ "legendFormat": "50%",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "GetTable Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 16
- },
- "id": 34,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_distributor_bytes_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (instance)",
- "intervalFactor": 1,
- "legendFormat": "{{instance}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Bytes Received/Second",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 9
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 6,
- "y": 16
- },
- "id": 35,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
+ "hiddenSeries": false,
+ "id": 45,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))",
+ "intervalFactor": 1,
+ "legendFormat": ".9",
+ "refId": "A"
},
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_distributor_lines_received_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (instance)",
- "intervalFactor": 1,
- "legendFormat": "{{instance}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Lines Received/Second",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))",
+ "refId": "B"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "ListTables Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "Distributor",
- "type": "row"
- },
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 27
},
- "id": 19,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 3
- },
- "id": 36,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": true,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"ingester.*\"}",
- "intervalFactor": 3,
- "legendFormat": "{{pod_name}}-{{container_name}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "CPU Usage",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 0,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 47,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (status_code)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "MutateRows Status",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 6,
- "y": 3
- },
- "id": 37,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": true,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"ingester.*\"}",
- "instant": false,
- "intervalFactor": 3,
- "legendFormat": "{{pod_name}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Memory Usage",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 6,
+ "y": 16
},
- {
- "aliasColors": {},
- "bars": true,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$logmetrics",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 3,
- "w": 12,
- "x": 12,
- "y": 3
- },
- "id": 38,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "warn",
- "color": "#FF9830"
- },
- {
- "alias": "error",
- "color": "#F2495C"
- },
- {
- "alias": "info",
- "color": "#73BF69"
- },
- {
- "alias": "debug",
- "color": "#5794F2"
- }
- ],
- "spaceLength": 10,
- "stack": true,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", level!~\"debug|info\"}[5m])) by (level)",
- "intervalFactor": 3,
- "legendFormat": "{{level}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": false,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "hiddenSeries": false,
+ "id": 50,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (status_code)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "ReadRows Status",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "datasource": "$logs",
- "gridPos": {
- "h": 18,
- "w": 12,
- "x": 12,
- "y": 6
- },
- "id": 39,
- "options": {
- "showTime": false,
- "sortOrder": "Descending"
- },
- "targets": [
- {
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", level!~\"info|debug\"}",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Logs",
- "type": "logs"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 10
- },
- "id": 67,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\", status_code=~\"success|200\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/ingester\"}[5m])) by (route)",
- "intervalFactor": 1,
- "legendFormat": "{{route}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Success Rate",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "Ingester",
- "type": "row"
- },
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 28
},
- "id": 64,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 4
- },
- "id": 68,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": true,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"querier.*\"}",
- "intervalFactor": 3,
- "legendFormat": "{{pod_name}}-{{container_name}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "CPU Usage",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 12,
+ "y": 16
+ },
+ "hiddenSeries": false,
+ "id": 48,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (status_code)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "GetTable Status",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 6,
- "y": 4
- },
- "id": 69,
- "legend": {
- "avg": false,
- "current": false,
- "hideEmpty": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": true,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"querier.*\"}",
- "instant": false,
- "intervalFactor": 3,
- "legendFormat": "{{pod_name}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Memory Usage",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 16
},
- {
- "aliasColors": {},
- "bars": true,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$logmetrics",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 3,
- "w": 12,
- "x": 12,
- "y": 4
- },
- "id": 65,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": false,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "warn",
- "color": "#FF9830"
- },
- {
- "alias": "error",
- "color": "#F2495C"
- },
- {
- "alias": "info",
- "color": "#73BF69"
- },
- {
- "alias": "debug",
- "color": "#5794F2"
- }
- ],
- "spaceLength": 10,
- "stack": true,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate({cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", level!~\"debug|info\"}[5m])) by (level)",
- "intervalFactor": 3,
- "legendFormat": "{{level}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": false,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "hiddenSeries": false,
+ "id": 49,
+ "interval": "",
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (status_code)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "ListTables Status",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "datasource": "$logs",
- "gridPos": {
- "h": 18,
- "w": 12,
- "x": 12,
- "y": 7
- },
- "id": 66,
- "options": {
- "showTime": false,
- "sortOrder": "Descending"
- },
- "targets": [
- {
- "expr": "{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", level!~\"info|debug\"}",
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "Logs",
- "type": "logs"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 11
- },
- "id": 70,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", status_code=~\"success|200\"}[5m])) by (route)\n/\nsum(rate(loki_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"}[5m])) by (route)",
- "intervalFactor": 1,
- "legendFormat": "{{route}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Success Rate",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "Querier",
- "type": "row"
+ }
+ ],
+ "title": "Big Table",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 35
},
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 29
- },
- "id": 52,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 22
- },
- "id": 53,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le))",
- "intervalFactor": 1,
- "legendFormat": ".99-{{method}}-{{name}}",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le))",
- "hide": false,
- "legendFormat": ".9-{{method}}-{{name}}",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_memcache_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (method, name, le))",
- "hide": false,
- "legendFormat": ".5-{{method}}-{{name}}",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Latency By Method",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ "id": 60,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 33
+ },
+ "hiddenSeries": false,
+ "id": 61,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "intervalFactor": 1,
+ "legendFormat": ".99-{{operation}}",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".9-{{operation}}",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".5-{{operation}}",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Latency By Operation",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 30
- },
- "id": 54,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_memcache_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, method, name)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}-{{method}}-{{name}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Status By Method",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "Memcached",
- "type": "row"
- },
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 30
},
- "id": 57,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 31
- },
- "id": 55,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".99-{{operation}}",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".9-{{operation}}",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".5-{{operation}}",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Latency By Operation",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 41
+ },
+ "hiddenSeries": false,
+ "id": 62,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}-{{operation}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Status By Method",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 39
- },
- "id": 58,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_consul_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, status_code, method)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}-{{operation}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Status By Operation",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "GCS",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 36
+ },
+ "id": 76,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 6,
+ "x": 0,
+ "y": 9
+ },
+ "id": 82,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_dynamo_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Failure Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "Consul",
- "type": "row"
- },
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 31
},
- "id": 43,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 5
- },
- "id": 41,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".9",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (operation, le))",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "MutateRows Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 6,
+ "x": 6,
+ "y": 9
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 6,
- "y": 5
- },
- "id": 46,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".9",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (operation, le))",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "ReadRows Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
+ "id": 83,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 12,
- "y": 5
- },
- "id": 44,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".9",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (operation, le))",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "GetTable Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_dynamo_consumed_capacity_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Consumed Capacity Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 18,
- "y": 5
- },
- "id": 45,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".9",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_bigtable_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (operation, le))",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "ListTables Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 6,
+ "x": 12,
+ "y": 9
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 12
- },
- "id": 47,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/MutateRows\"}[5m])) by (status_code)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "MutateRows Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "id": 84,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_dynamo_throttled_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Throttled Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 6,
- "y": 12
- },
- "id": 50,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.v2.Bigtable/ReadRows\"}[5m])) by (status_code)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "ReadRows Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 6,
+ "x": 18,
+ "y": 9
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 12,
- "y": 12
- },
- "id": 48,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable\"}[5m])) by (status_code)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "GetTable Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "id": 85,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_dynamo_dropped_requests_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Dropped Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 18,
- "y": 12
- },
- "id": 49,
- "interval": "",
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_bigtable_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", operation=\"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables\"}[5m])) by (status_code)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "ListTables Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "Big Table",
- "type": "row"
- },
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 32
},
- "id": 60,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 8
- },
- "id": 61,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".99-{{operation}}",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".9-{{operation}}",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_gcs_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".5-{{operation}}",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Latency By Operation",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": null,
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 6,
+ "x": 0,
+ "y": 15
+ },
+ "id": 86,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))",
+ "legendFormat": ".99",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))",
+ "legendFormat": ".9",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))",
+ "legendFormat": ".5",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Query Pages",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 16
- },
- "id": 62,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_gcs_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}-{{operation}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Status By Method",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "GCS",
- "type": "row"
- },
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 33
},
- "id": 76,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": null,
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 6,
- "x": 0,
- "y": 9
- },
- "id": 82,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_dynamo_failures_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Failure Rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 9,
+ "x": 6,
+ "y": 15
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": null,
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 6,
- "x": 6,
- "y": 9
- },
- "id": 83,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_dynamo_consumed_capacity_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Consumed Capacity Rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
+ "id": 87,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_dynamo_request_duration_seconds_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "intervalFactor": 1,
+ "legendFormat": ".99-{{operation}}",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".9-{{operation}}",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".5-{{operation}}",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Latency By Operation",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": null,
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 6,
- "x": 12,
- "y": 9
- },
- "id": 84,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_dynamo_throttled_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Throttled Rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 6,
+ "w": 9,
+ "x": 15,
+ "y": 15
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": null,
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 6,
- "x": 18,
- "y": 9
- },
- "id": 85,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_dynamo_dropped_requests_total{cluster=\"$cluster\", namespace=\"$namespace\"}[5m]))",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Dropped Rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ "id": 88,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_dynamo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}-{{operation}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Status By Method",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": null,
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 6,
- "x": 0,
- "y": 15
- },
- "id": 86,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 2,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))",
- "legendFormat": ".99",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))",
- "legendFormat": ".9",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_dynamo_query_pages_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])))",
- "legendFormat": ".5",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Query Pages",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "Dynamo",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 37
+ },
+ "id": 78,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 10
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 9,
- "x": 6,
- "y": 15
- },
- "id": 87,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_dynamo_request_duration_seconds_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".99-{{operation}}",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".9-{{operation}}",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_dynamo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".5-{{operation}}",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Latency By Operation",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ "id": 79,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_s3_request_duration_seconds_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "intervalFactor": 1,
+ "legendFormat": ".99-{{operation}}",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".9-{{operation}}",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".5-{{operation}}",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Latency By Operation",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 9,
- "x": 15,
- "y": 15
- },
- "id": 88,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_dynamo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}-{{operation}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Status By Method",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "Dynamo",
- "type": "row"
- },
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 34
},
- "id": 78,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 10
- },
- "id": 79,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_s3_request_duration_seconds_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".99-{{operation}}",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".9-{{operation}}",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_s3_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".5-{{operation}}",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Latency By Operation",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 18
+ },
+ "id": 80,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(cortex_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}-{{operation}}",
+ "refId": "A"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Status By Method",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 18
- },
- "id": 80,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_s3_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}-{{operation}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Status By Method",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
}
- ],
- "title": "S3",
- "type": "row"
+ }
+ ],
+ "title": "S3",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": null,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 38
},
- {
- "collapsed": true,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 35
- },
- "id": 90,
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 36
- },
- "id": 91,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "histogram_quantile(.99, sum(rate(cortex_cassandra_request_duration_seconds_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "intervalFactor": 1,
- "legendFormat": ".99-{{operation}}",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(.9, sum(rate(cortex_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".9-{{operation}}",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(.5, sum(rate(cortex_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
- "hide": false,
- "legendFormat": ".5-{{operation}}",
- "refId": "C"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Latency By Operation",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
+ "id": 90,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 36
+ },
+ "id": 91,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(.99, sum(rate(cortex_cassandra_request_duration_seconds_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "intervalFactor": 1,
+ "legendFormat": ".99-{{operation}}",
+ "refId": "A"
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ {
+ "expr": "histogram_quantile(.9, sum(rate(cortex_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".9-{{operation}}",
+ "refId": "B"
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "expr": "histogram_quantile(.5, sum(rate(cortex_cassandra_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (operation, le))",
+ "hide": false,
+ "legendFormat": ".5-{{operation}}",
+ "refId": "C"
}
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Latency By Operation",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
},
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 44
- },
- "id": 92,
- "interval": "",
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 1,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(rate(cortex_cassandra_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)",
- "intervalFactor": 1,
- "legendFormat": "{{status_code}}-{{operation}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Status By Method",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
- }
- ],
- "title": "Cassandra",
- "type": "row"
- }
- ],
- "refresh": false,
- "schemaVersion": 22,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "hide": 0,
- "includeAll": false,
- "label": null,
- "multi": false,
- "name": "datasource",
- "options": [],
- "query": "prometheus",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "type": "datasource"
- },
- {
- "hide": 0,
- "includeAll": false,
- "label": null,
- "multi": false,
- "name": "logs",
- "options": [],
- "query": "loki",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "type": "datasource"
- },
- {
- "hide": 0,
- "includeAll": false,
- "label": null,
- "multi": false,
- "name": "logmetrics",
- "options": [],
- "query": "prometheus",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "type": "datasource"
- },
- {
- "allValue": null,
- "datasource": "$datasource",
- "definition": "label_values(kube_pod_container_info{image=~\".*loki.*\"}, cluster)",
- "hide": 0,
- "includeAll": false,
- "label": null,
- "multi": false,
- "name": "cluster",
- "options": [
],
- "query": "label_values(kube_pod_container_info{image=~\".*loki.*\"}, cluster)",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
{
- "allValue": null,
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
"datasource": "$datasource",
- "definition": "label_values(kube_pod_container_info{image=~\".*loki.*\", cluster=\"$cluster\"}, namespace)",
- "hide": 0,
- "includeAll": false,
- "label": null,
- "multi": false,
- "name": "namespace",
- "options": [
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 44
+ },
+ "id": 92,
+ "interval": "",
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "dataLinks": []
+ },
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "selected": true,
- "text": "tempo-dev",
- "value": "tempo-dev"
- },
+ "expr": "sum(rate(cortex_cassandra_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\"}[5m])) by (status_code, operation)",
+ "intervalFactor": 1,
+ "legendFormat": "{{status_code}}-{{operation}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Status By Method",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
{
- "selected": false,
- "text": "cortex-ops",
- "value": "cortex-ops"
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
},
{
- "selected": false,
- "text": "default",
- "value": "default"
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
}
],
- "query": "label_values(kube_pod_container_info{image=~\".*loki.*\", cluster=\"$cluster\"}, namespace)",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
}
- ]
- },
- "time": {
- "from": "now-3h",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ]
- },
- "timezone": "",
- "title": "Loki Operational",
- "uid": "f6fe30815b172c9da7e810c15ddfe607",
- "version": 1
- }
\ No newline at end of file
+ ],
+ "title": "Cassandra",
+ "type": "row"
+ }
+ ],
+ "refresh": "30s",
+ "schemaVersion": 22,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "ops-cortex",
+ "value": "ops-cortex"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "datasource",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "Grafana Logging",
+ "value": "Grafana Logging"
+ },
+ "hide": 0,
+ "includeAll": false,
+ "label": null,
+ "multi": false,
+ "name": "logs",
+ "options": [],
+ "query": "loki",
+ "queryValue": "",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "type": "datasource"
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "us-central1",
+ "value": "us-central1"
+ },
+ "datasource": "$datasource",
+ "definition": "label_values(kube_pod_container_info{image=~\".*loki.*\"}, cluster)",
+ "hide": 0,
+ "includeAll": false,
+ "index": -1,
+ "label": null,
+ "multi": false,
+ "name": "cluster",
+ "options": [],
+ "query": "label_values(kube_pod_container_info{image=~\".*loki.*\"}, cluster)",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ },
+ {
+ "allValue": null,
+ "current": {
+ "text": "loki-prod",
+ "value": "loki-prod"
+ },
+ "datasource": "$datasource",
+ "definition": "label_values(kube_pod_container_info{image=~\".*loki.*\", cluster=\"$cluster\"}, namespace)",
+ "hide": 0,
+ "includeAll": false,
+ "index": -1,
+ "label": null,
+ "multi": false,
+ "name": "namespace",
+ "options": [],
+ "query": "label_values(kube_pod_container_info{image=~\".*loki.*\", cluster=\"$cluster\"}, namespace)",
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-3h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ]
+ },
+ "timezone": "",
+ "title": "Loki Operational",
+ "uid": "f6fe30815b172c9da7e813c15ddfe607",
+ "variables": {
+ "list": []
+ },
+ "version": 21
+}
|
dashboard
|
Loki Operational improvements (#2041)
|
72e6fcc9d223cd8951b0b0937647b109bf234950
|
2022-11-23 15:44:43
|
Robert Jacob
|
operator: Move Loki TLS configuration into ConfigMap (#7738)
| false
|
diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go
index 58aea0fbb4a79..373ebce8504cd 100644
--- a/operator/internal/manifests/build_test.go
+++ b/operator/internal/manifests/build_test.go
@@ -2,7 +2,6 @@ package manifests
import (
"fmt"
- "strings"
"testing"
"github.com/ViaQ/logerr/v2/kverrors"
@@ -348,14 +347,6 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
HTTPEncryption: true,
},
}
- ciphers := strings.Join([]string{
- "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
- "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
- }, ",")
err := ApplyDefaultSettings(&opts)
require.NoError(t, err)
@@ -369,7 +360,6 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
name string
vs []corev1.Volume
vms []corev1.VolumeMount
- args []string
rps corev1.URIScheme
lps corev1.URIScheme
)
@@ -379,14 +369,12 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
name = o.Name
vs = o.Spec.Template.Spec.Volumes
vms = o.Spec.Template.Spec.Containers[0].VolumeMounts
- args = o.Spec.Template.Spec.Containers[0].Args
rps = o.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme
lps = o.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme
case *appsv1.StatefulSet:
name = o.Name
vs = o.Spec.Template.Spec.Volumes
vms = o.Spec.Template.Spec.Containers[0].VolumeMounts
- args = o.Spec.Template.Spec.Containers[0].Args
rps = o.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme
lps = o.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme
default:
@@ -411,10 +399,6 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
}
require.Contains(t, vms, expVolumeMount)
- require.Contains(t, args, "-server.tls-min-version=VersionTLS12")
- require.Contains(t, args, fmt.Sprintf("-server.tls-cipher-suites=%s", ciphers))
- require.Contains(t, args, "-server.http-tls-cert-path=/var/run/tls/http/server/tls.crt")
- require.Contains(t, args, "-server.http-tls-key-path=/var/run/tls/http/server/tls.key")
require.Equal(t, corev1.URISchemeHTTPS, rps)
require.Equal(t, corev1.URISchemeHTTPS, lps)
}
@@ -448,7 +432,6 @@ func TestBuildAll_WithFeatureGates_ServiceMonitorTLSEndpoints(t *testing.T) {
name string
vs []corev1.Volume
vms []corev1.VolumeMount
- args []string
rps corev1.URIScheme
lps corev1.URIScheme
)
@@ -458,14 +441,12 @@ func TestBuildAll_WithFeatureGates_ServiceMonitorTLSEndpoints(t *testing.T) {
name = o.Name
vs = o.Spec.Template.Spec.Volumes
vms = o.Spec.Template.Spec.Containers[0].VolumeMounts
- args = o.Spec.Template.Spec.Containers[0].Args
rps = o.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme
lps = o.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme
case *appsv1.StatefulSet:
name = o.Name
vs = o.Spec.Template.Spec.Volumes
vms = o.Spec.Template.Spec.Containers[0].VolumeMounts
- args = o.Spec.Template.Spec.Containers[0].Args
rps = o.Spec.Template.Spec.Containers[0].ReadinessProbe.ProbeHandler.HTTPGet.Scheme
lps = o.Spec.Template.Spec.Containers[0].LivenessProbe.ProbeHandler.HTTPGet.Scheme
default:
@@ -490,8 +471,6 @@ func TestBuildAll_WithFeatureGates_ServiceMonitorTLSEndpoints(t *testing.T) {
}
require.Contains(t, vms, expVolumeMount)
- require.Contains(t, args, "-server.http-tls-cert-path=/var/run/tls/http/server/tls.crt")
- require.Contains(t, args, "-server.http-tls-key-path=/var/run/tls/http/server/tls.key")
require.Equal(t, corev1.URISchemeHTTPS, rps)
require.Equal(t, corev1.URISchemeHTTPS, lps)
}
@@ -602,15 +581,6 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
"test-ruler": "test-ruler-grpc",
}
- ciphers := strings.Join([]string{
- "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
- "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
- }, ",")
-
for _, tst := range table {
tst := tst
t.Run(tst.desc, func(t *testing.T) {
@@ -643,12 +613,6 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
t.Run(name, func(t *testing.T) {
secretName := secretsMap[name]
- args := []string{
- "-server.grpc-tls-cert-path=/var/run/tls/grpc/server/tls.crt",
- "-server.grpc-tls-key-path=/var/run/tls/grpc/server/tls.key",
- "-server.tls-min-version=VersionTLS12",
- fmt.Sprintf("-server.tls-cipher-suites=%s", ciphers),
- }
vm := corev1.VolumeMount{
Name: secretName,
@@ -666,11 +630,9 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
}
if tst.BuildOptions.Gates.GRPCEncryption {
- require.Subset(t, spec.Containers[0].Args, args)
require.Contains(t, spec.Containers[0].VolumeMounts, vm)
require.Contains(t, spec.Volumes, v)
} else {
- require.NotSubset(t, spec.Containers[0].Args, args)
require.NotContains(t, spec.Containers[0].VolumeMounts, vm)
require.NotContains(t, spec.Volumes, v)
}
diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go
index f9950514b07a5..ee8a17b57d286 100644
--- a/operator/internal/manifests/compactor.go
+++ b/operator/internal/manifests/compactor.go
@@ -119,13 +119,6 @@ func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
- if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
- podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
- fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- )
- }
-
if opts.Stack.Template != nil && opts.Stack.Template.Compactor != nil {
podSpec.Tolerations = opts.Stack.Template.Compactor.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Compactor.NodeSelector
@@ -241,7 +234,7 @@ func NewCompactorHTTPService(opts Options) *corev1.Service {
func configureCompactorHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameCompactorHTTP(opts.Name)
- return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
}
func configureCompactorGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go
index da9906d0f3cba..5587d43f16432 100644
--- a/operator/internal/manifests/config.go
+++ b/operator/internal/manifests/config.go
@@ -81,7 +81,35 @@ func ConfigOptions(opt Options) config.Options {
}
return config.Options{
- Stack: opt.Stack,
+ Stack: opt.Stack,
+ Gates: opt.Gates,
+ TLS: config.TLSOptions{
+ Ciphers: opt.TLSProfile.Ciphers,
+ MinTLSVersion: opt.TLSProfile.MinTLSVersion,
+ Paths: config.TLSFilePaths{
+ CA: signingCAPath(),
+ GRPC: config.TLSCertPath{
+ Certificate: lokiServerGRPCTLSCert(),
+ Key: lokiServerGRPCTLSKey(),
+ },
+ HTTP: config.TLSCertPath{
+ Certificate: lokiServerHTTPTLSCert(),
+ Key: lokiServerHTTPTLSKey(),
+ },
+ },
+ ServerNames: config.TLSServerNames{
+ GRPC: config.GRPCServerNames{
+ IndexGateway: fqdn(serviceNameIndexGatewayGRPC(opt.Name), opt.Namespace),
+ Ingester: fqdn(serviceNameIngesterGRPC(opt.Name), opt.Namespace),
+ QueryFrontend: fqdn(serviceNameQueryFrontendGRPC(opt.Name), opt.Namespace),
+ Ruler: fqdn(serviceNameRulerGRPC(opt.Name), opt.Namespace),
+ },
+ HTTP: config.HTTPServerNames{
+ Compactor: fqdn(serviceNameCompactorHTTP(opt.Name), opt.Namespace),
+ Querier: fqdn(serviceNameQuerierHTTP(opt.Name), opt.Namespace),
+ },
+ },
+ },
Namespace: opt.Namespace,
Name: opt.Name,
Compactor: config.Address{
diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go
index 93d4e102e5a10..28bded8b36018 100644
--- a/operator/internal/manifests/distributor.go
+++ b/operator/internal/manifests/distributor.go
@@ -6,8 +6,6 @@ import (
"github.com/grafana/loki/operator/internal/manifests/internal/config"
- "github.com/ViaQ/logerr/v2/kverrors"
- "github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -115,13 +113,6 @@ func NewDistributorDeployment(opts Options) *appsv1.Deployment {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
- if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
- podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
- fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- )
- }
-
if opts.Stack.Template != nil && opts.Stack.Template.Distributor != nil {
podSpec.Tolerations = opts.Stack.Template.Distributor.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Distributor.NodeSelector
@@ -218,27 +209,10 @@ func NewDistributorHTTPService(opts Options) *corev1.Service {
func configureDistributorHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
serviceName := serviceNameDistributorHTTP(opts.Name)
- return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
func configureDistributorGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
- secretContainerSpec := corev1.Container{
- Args: []string{
- // Enable GRPC over TLS for ingester client
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
- },
- }
-
- if err := mergo.Merge(&deployment.Spec.Template.Spec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge container")
- }
-
serviceName := serviceNameDistributorGRPC(opts.Name)
return configureGRPCServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go
index 97101691c9ab7..473af989448b0 100644
--- a/operator/internal/manifests/indexgateway.go
+++ b/operator/internal/manifests/indexgateway.go
@@ -119,13 +119,6 @@ func NewIndexGatewayStatefulSet(opts Options) *appsv1.StatefulSet {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
- if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
- podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
- fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- )
- }
-
if opts.Stack.Template != nil && opts.Stack.Template.IndexGateway != nil {
podSpec.Tolerations = opts.Stack.Template.IndexGateway.Tolerations
podSpec.NodeSelector = opts.Stack.Template.IndexGateway.NodeSelector
@@ -242,7 +235,7 @@ func NewIndexGatewayHTTPService(opts Options) *corev1.Service {
func configureIndexGatewayHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameIndexGatewayHTTP(opts.Name)
- return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
}
func configureIndexGatewayGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go
index 5e49ec4f68dd4..623df0d61be2b 100644
--- a/operator/internal/manifests/ingester.go
+++ b/operator/internal/manifests/ingester.go
@@ -7,8 +7,6 @@ import (
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
- "github.com/ViaQ/logerr/v2/kverrors"
- "github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -131,13 +129,6 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
- if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
- podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
- fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- )
- }
-
if opts.Stack.Template != nil && opts.Stack.Template.Ingester != nil {
podSpec.Tolerations = opts.Stack.Template.Ingester.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Ingester.NodeSelector
@@ -271,35 +262,10 @@ func NewIngesterHTTPService(opts Options) *corev1.Service {
func configureIngesterHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameIngesterHTTP(opts.Name)
- return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
}
func configureIngesterGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
- secretContainerSpec := corev1.Container{
- Args: []string{
- // Enable GRPC over TLS for ingester client
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
- // Enable GRPC over TLS for boltb-shipper index-gateway client
- "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
- },
- }
-
- if err := mergo.Merge(&sts.Spec.Template.Spec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge container")
- }
-
serviceName := serviceNameIngesterGRPC(opts.Name)
return configureGRPCServicePKI(&sts.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 752c243ed9140..e2f28829d341f 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -3,6 +3,7 @@ package config
import (
"testing"
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
"github.com/stretchr/testify/require"
@@ -2307,3 +2308,330 @@ overrides:
require.YAMLEq(t, expCfg, string(cfg))
require.YAMLEq(t, expRCfg, string(rCfg))
}
+
+func TestBuild_ConfigAndRuntimeConfig_WithTLS(t *testing.T) {
+ expCfg := `
+---
+auth_enabled: true
+chunk_store_config:
+ chunk_cache_config:
+ enable_fifocache: true
+ fifocache:
+ max_size_bytes: 500MB
+common:
+ storage:
+ s3:
+ s3: http://test.default.svc.cluster.local.:9000
+ bucketnames: loki
+ region: us-east
+ access_key_id: test
+ secret_access_key: test123
+ s3forcepathstyle: true
+ compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+compactor:
+ compaction_interval: 2h
+ working_directory: /tmp/loki/compactor
+frontend:
+ tail_proxy_url: http://loki-querier-http-lokistack-dev.default.svc.cluster.local:3100
+ tail_tls_config:
+ tls_cert_path: /var/run/tls/http/tls.crt
+ tls_key_path: /var/run/tls/http/tls.key
+ tls_ca_path: /var/run/tls/ca.pem
+ tls_server_name: querier-http.svc
+ tls_cipher_suites: cipher1,cipher2
+ tls_min_version: VersionTLS12
+ compress_responses: true
+ max_outstanding_per_tenant: 256
+ log_queries_longer_than: 5s
+frontend_worker:
+ frontend_address: loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local:9095
+ grpc_client_config:
+ max_send_msg_size: 104857600
+ tls_enabled: true
+ tls_cert_path: /var/run/tls/grpc/tls.crt
+ tls_key_path: /var/run/tls/grpc/tls.key
+ tls_ca_path: /var/run/tls/ca.pem
+ tls_server_name: query-frontend-grpc.svc
+ tls_cipher_suites: cipher1,cipher2
+ tls_min_version: VersionTLS12
+ match_max_concurrent: true
+ingester:
+ chunk_block_size: 262144
+ chunk_encoding: snappy
+ chunk_idle_period: 1h
+ chunk_retain_period: 5m
+ chunk_target_size: 2097152
+ flush_op_timeout: 10m
+ lifecycler:
+ final_sleep: 0s
+ heartbeat_period: 5s
+ join_after: 30s
+ num_tokens: 512
+ ring:
+ replication_factor: 1
+ heartbeat_timeout: 1m
+ max_chunk_age: 2h
+ max_transfer_retries: 0
+ wal:
+ enabled: true
+ dir: /tmp/wal
+ replay_memory_ceiling: 2500
+ingester_client:
+ grpc_client_config:
+ max_recv_msg_size: 67108864
+ tls_enabled: true
+ tls_cert_path: /var/run/tls/grpc/tls.crt
+ tls_key_path: /var/run/tls/grpc/tls.key
+ tls_ca_path: /var/run/tls/ca.pem
+ tls_server_name: ingester-grpc.svc
+ tls_cipher_suites: cipher1,cipher2
+ tls_min_version: VersionTLS12
+ remote_timeout: 1s
+# NOTE: Keep the order of keys as in Loki docs
+# to enable easy diffs when vendoring newer
+# Loki releases.
+# (See https://grafana.com/docs/loki/latest/configuration/#limits_config)
+#
+# Values for not exposed fields are taken from the grafana/loki production
+# configuration manifests.
+# (See https://github.com/grafana/loki/blob/main/production/ksonnet/loki/config.libsonnet)
+limits_config:
+ ingestion_rate_strategy: global
+ ingestion_rate_mb: 4
+ ingestion_burst_size_mb: 6
+ max_label_name_length: 1024
+ max_label_value_length: 2048
+ max_label_names_per_series: 30
+ reject_old_samples: true
+ reject_old_samples_max_age: 168h
+ creation_grace_period: 10m
+ enforce_metric_name: false
+ # Keep max_streams_per_user always to 0 to default
+ # using max_global_streams_per_user always.
+ # (See https://github.com/grafana/loki/blob/main/pkg/ingester/limiter.go#L73)
+ max_streams_per_user: 0
+ max_line_size: 256000
+ max_entries_limit_per_query: 5000
+ max_global_streams_per_user: 0
+ max_chunks_per_query: 2000000
+ max_query_length: 721h
+ max_query_parallelism: 32
+ max_query_series: 500
+ cardinality_limit: 100000
+ max_streams_matchers_per_query: 1000
+ max_cache_freshness_per_query: 10m
+ per_stream_rate_limit: 3MB
+ per_stream_rate_limit_burst: 15MB
+ split_queries_by_interval: 30m
+ query_timeout: 1m
+memberlist:
+ abort_if_cluster_join_fails: true
+ bind_port: 7946
+ join_members:
+ - loki-gossip-ring-lokistack-dev.default.svc.cluster.local:7946
+ max_join_backoff: 1m
+ max_join_retries: 10
+ min_join_backoff: 1s
+querier:
+ engine:
+ max_look_back_period: 30s
+ timeout: 3m
+ extra_query_delay: 0s
+ max_concurrent: 2
+ query_ingesters_within: 3h
+ tail_max_duration: 1h
+compactor_client:
+ tls_enabled: true
+ tls_cert_path: /var/run/tls/http/tls.crt
+ tls_key_path: /var/run/tls/http/tls.key
+ tls_ca_path: /var/run/tls/ca.pem
+ tls_server_name: compactor-http.svc
+ tls_cipher_suites: cipher1,cipher2
+ tls_min_version: VersionTLS12
+query_range:
+ align_queries_with_step: true
+ cache_results: true
+ max_retries: 5
+ results_cache:
+ cache:
+ enable_fifocache: true
+ fifocache:
+ max_size_bytes: 500MB
+ parallelise_shardable_queries: true
+schema_config:
+ configs:
+ - from: "2020-10-01"
+ index:
+ period: 24h
+ prefix: index_
+ object_store: s3
+ schema: v11
+ store: boltdb-shipper
+
+internal_server:
+ enable: true
+ http_listen_address: ""
+ tls_min_version: VersionTLS12
+ tls_cipher_suites: cipher1,cipher2
+ http_tls_config:
+ cert_file: /var/run/tls/http/tls.crt
+ key_file: /var/run/tls/http/tls.key
+server:
+ graceful_shutdown_timeout: 5s
+ grpc_server_min_time_between_pings: '10s'
+ grpc_server_ping_without_stream_allowed: true
+ grpc_server_max_concurrent_streams: 1000
+ grpc_server_max_recv_msg_size: 104857600
+ grpc_server_max_send_msg_size: 104857600
+ http_listen_port: 3100
+ http_server_idle_timeout: 120s
+ http_server_write_timeout: 1m
+ tls_min_version: VersionTLS12
+ tls_cipher_suites: cipher1,cipher2
+ http_tls_config:
+ cert_file: /var/run/tls/http/tls.crt
+ key_file: /var/run/tls/http/tls.key
+ client_auth_type: RequireAndVerifyClientCert
+ client_ca_file: /var/run/tls/ca.pem
+ grpc_tls_config:
+ cert_file: /var/run/tls/grpc/tls.crt
+ key_file: /var/run/tls/grpc/tls.key
+ client_auth_type: RequireAndVerifyClientCert
+ client_ca_file: /var/run/tls/ca.pem
+ log_level: info
+storage_config:
+ boltdb_shipper:
+ active_index_directory: /tmp/loki/index
+ cache_location: /tmp/loki/index_cache
+ cache_ttl: 24h
+ resync_interval: 5m
+ shared_store: s3
+ index_gateway_client:
+ server_address: dns:///loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local:9095
+ grpc_client_config:
+ tls_enabled: true
+ tls_cert_path: /var/run/tls/grpc/tls.crt
+ tls_key_path: /var/run/tls/grpc/tls.key
+ tls_ca_path: /var/run/tls/ca.pem
+ tls_server_name: index-gateway-grpc.svc
+ tls_cipher_suites: cipher1,cipher2
+ tls_min_version: VersionTLS12
+tracing:
+ enabled: false
+analytics:
+ reporting_enabled: true
+`
+ expRCfg := `
+---
+overrides:
+`
+ opts := Options{
+ Stack: lokiv1.LokiStackSpec{
+ ReplicationFactor: 1,
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ IngestionLimits: &lokiv1.IngestionLimitSpec{
+ IngestionRate: 4,
+ IngestionBurstSize: 6,
+ MaxLabelNameLength: 1024,
+ MaxLabelValueLength: 2048,
+ MaxLabelNamesPerSeries: 30,
+ MaxGlobalStreamsPerTenant: 0,
+ MaxLineSize: 256000,
+ },
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ MaxEntriesLimitPerQuery: 5000,
+ MaxChunksPerQuery: 2000000,
+ MaxQuerySeries: 500,
+ QueryTimeout: "1m",
+ },
+ },
+ },
+ },
+ Gates: configv1.FeatureGates{
+ HTTPEncryption: true,
+ GRPCEncryption: true,
+ },
+ TLS: TLSOptions{
+ Ciphers: []string{"cipher1", "cipher2"},
+ MinTLSVersion: "VersionTLS12",
+ Paths: TLSFilePaths{
+ CA: "/var/run/tls/ca.pem",
+ GRPC: TLSCertPath{
+ Certificate: "/var/run/tls/grpc/tls.crt",
+ Key: "/var/run/tls/grpc/tls.key",
+ },
+ HTTP: TLSCertPath{
+ Certificate: "/var/run/tls/http/tls.crt",
+ Key: "/var/run/tls/http/tls.key",
+ },
+ },
+ ServerNames: TLSServerNames{
+ GRPC: GRPCServerNames{
+ IndexGateway: "index-gateway-grpc.svc",
+ Ingester: "ingester-grpc.svc",
+ QueryFrontend: "query-frontend-grpc.svc",
+ Ruler: "ruler-grpc.svc",
+ },
+ HTTP: HTTPServerNames{
+ Compactor: "compactor-http.svc",
+ Querier: "querier-http.svc",
+ },
+ },
+ },
+ Namespace: "test-ns",
+ Name: "test",
+ Compactor: Address{
+ FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
+ Port: 3100,
+ Protocol: "http",
+ },
+ FrontendWorker: Address{
+ FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ GossipRing: Address{
+ FQDN: "loki-gossip-ring-lokistack-dev.default.svc.cluster.local",
+ Port: 7946,
+ },
+ Querier: Address{
+ Protocol: "http",
+ FQDN: "loki-querier-http-lokistack-dev.default.svc.cluster.local",
+ Port: 3100,
+ },
+ IndexGateway: Address{
+ FQDN: "loki-index-gateway-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
+ },
+ StorageDirectory: "/tmp/loki",
+ MaxConcurrent: MaxConcurrent{
+ AvailableQuerierCPUCores: 2,
+ },
+ WriteAheadLog: WriteAheadLog{
+ Directory: "/tmp/wal",
+ IngesterMemoryRequest: 5000,
+ },
+ ObjectStorage: storage.Options{
+ SharedStore: lokiv1.ObjectStorageSecretS3,
+ S3: &storage.S3StorageConfig{
+ Endpoint: "http://test.default.svc.cluster.local.:9000",
+ Region: "us-east",
+ Buckets: "loki",
+ AccessKeyID: "test",
+ AccessKeySecret: "test123",
+ },
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-01",
+ },
+ },
+ },
+ EnableRemoteReporting: true,
+ }
+ cfg, rCfg, err := Build(opts)
+ require.NoError(t, err)
+ t.Log(string(cfg))
+ require.YAMLEq(t, expCfg, string(cfg))
+ require.YAMLEq(t, expRCfg, string(rCfg))
+}
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index 85f904883d368..8a9f7bdfeccda 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -1,3 +1,4 @@
+{{- /*gotype: github.com/grafana/loki/operator/internal/manifests/internal/config.Options*/ -}}
---
auth_enabled: true
chunk_store_config:
@@ -55,6 +56,15 @@ compactor:
{{- end }}{{- end }}
frontend:
tail_proxy_url: {{ .Querier.Protocol }}://{{ .Querier.FQDN }}:{{ .Querier.Port }}
+{{- if .Gates.HTTPEncryption }}
+ tail_tls_config:
+ tls_cert_path: {{ .TLS.Paths.HTTP.Certificate }}
+ tls_key_path: {{ .TLS.Paths.HTTP.Key }}
+ tls_ca_path: {{ .TLS.Paths.CA }}
+ tls_server_name: {{ .TLS.ServerNames.HTTP.Querier }}
+ tls_cipher_suites: {{ .TLS.CipherSuitesString }}
+ tls_min_version: {{ .TLS.MinTLSVersion }}
+{{- end }}
compress_responses: true
max_outstanding_per_tenant: 256
log_queries_longer_than: 5s
@@ -62,6 +72,15 @@ frontend_worker:
frontend_address: {{ .FrontendWorker.FQDN }}:{{ .FrontendWorker.Port }}
grpc_client_config:
max_send_msg_size: 104857600
+{{- if .Gates.GRPCEncryption }}
+ tls_enabled: true
+ tls_cert_path: {{ .TLS.Paths.GRPC.Certificate }}
+ tls_key_path: {{ .TLS.Paths.GRPC.Key }}
+ tls_ca_path: {{ .TLS.Paths.CA }}
+ tls_server_name: {{ .TLS.ServerNames.GRPC.QueryFrontend }}
+ tls_cipher_suites: {{ .TLS.CipherSuitesString }}
+ tls_min_version: {{ .TLS.MinTLSVersion }}
+{{- end }}
match_max_concurrent: true
ingester:
chunk_block_size: 262144
@@ -87,6 +106,15 @@ ingester:
ingester_client:
grpc_client_config:
max_recv_msg_size: 67108864
+{{- if .Gates.GRPCEncryption }}
+ tls_enabled: true
+ tls_cert_path: {{ .TLS.Paths.GRPC.Certificate }}
+ tls_key_path: {{ .TLS.Paths.GRPC.Key }}
+ tls_ca_path: {{ .TLS.Paths.CA }}
+ tls_server_name: {{ .TLS.ServerNames.GRPC.Ingester }}
+ tls_cipher_suites: {{ .TLS.CipherSuitesString }}
+ tls_min_version: {{ .TLS.MinTLSVersion }}
+{{- end }}
remote_timeout: 1s
# NOTE: Keep the order of keys as in Loki docs
# to enable easy diffs when vendoring newer
@@ -152,6 +180,16 @@ querier:
query_ingesters_within: 3h
tail_max_duration: 1h
max_concurrent: {{ .MaxConcurrent.AvailableQuerierCPUCores }}
+{{- if .Gates.HTTPEncryption }}
+compactor_client:
+ tls_enabled: true
+ tls_cert_path: {{ .TLS.Paths.HTTP.Certificate }}
+ tls_key_path: {{ .TLS.Paths.HTTP.Key }}
+ tls_ca_path: {{ .TLS.Paths.CA }}
+ tls_server_name: {{ .TLS.ServerNames.HTTP.Compactor }}
+ tls_cipher_suites: {{ .TLS.CipherSuitesString }}
+ tls_min_version: {{ .TLS.MinTLSVersion }}
+{{- end }}
query_range:
align_queries_with_step: true
cache_results: true
@@ -334,7 +372,27 @@ ruler:
ring:
kvstore:
store: memberlist
+{{- if .Gates.GRPCEncryption }}
+ ruler_client:
+ tls_enabled: true
+ tls_cert_path: {{ .TLS.Paths.GRPC.Certificate }}
+ tls_key_path: {{ .TLS.Paths.GRPC.Key }}
+ tls_ca_path: {{ .TLS.Paths.CA }}
+ tls_server_name: {{ .TLS.ServerNames.GRPC.Ruler }}
+ tls_cipher_suites: {{ .TLS.CipherSuitesString }}
+ tls_min_version: {{ .TLS.MinTLSVersion }}
+{{- end }}
{{ end }}
+{{- if .Gates.HTTPEncryption }}
+internal_server:
+ enable: true
+ http_listen_address: ""
+ tls_min_version: {{ .TLS.MinTLSVersion }}
+ tls_cipher_suites: {{ .TLS.CipherSuitesString }}
+ http_tls_config:
+ cert_file: {{ .TLS.Paths.HTTP.Certificate }}
+ key_file: {{ .TLS.Paths.HTTP.Key }}
+{{- end }}
server:
graceful_shutdown_timeout: 5s
grpc_server_min_time_between_pings: '10s'
@@ -345,6 +403,24 @@ server:
http_listen_port: 3100
http_server_idle_timeout: 120s
http_server_write_timeout: 1m
+{{- if or .Gates.HTTPEncryption .Gates.GRPCEncryption }}
+ tls_min_version: {{ .TLS.MinTLSVersion }}
+ tls_cipher_suites: {{ .TLS.CipherSuitesString }}
+{{- if .Gates.HTTPEncryption }}
+ http_tls_config:
+ cert_file: {{ .TLS.Paths.HTTP.Certificate }}
+ key_file: {{ .TLS.Paths.HTTP.Key }}
+ client_auth_type: RequireAndVerifyClientCert
+ client_ca_file: {{ .TLS.Paths.CA }}
+{{- end }}
+{{- if .Gates.GRPCEncryption }}
+ grpc_tls_config:
+ cert_file: {{ .TLS.Paths.GRPC.Certificate }}
+ key_file: {{ .TLS.Paths.GRPC.Key }}
+ client_auth_type: RequireAndVerifyClientCert
+ client_ca_file: {{ .TLS.Paths.CA }}
+{{- end }}
+{{- end }}
log_level: info
storage_config:
boltdb_shipper:
@@ -355,6 +431,16 @@ storage_config:
shared_store: {{ .ObjectStorage.SharedStore }}
index_gateway_client:
server_address: dns:///{{ .IndexGateway.FQDN }}:{{ .IndexGateway.Port }}
+{{- if .Gates.GRPCEncryption }}
+ grpc_client_config:
+ tls_enabled: true
+ tls_cert_path: {{ .TLS.Paths.GRPC.Certificate }}
+ tls_key_path: {{ .TLS.Paths.GRPC.Key }}
+ tls_ca_path: {{ .TLS.Paths.CA }}
+ tls_server_name: {{ .TLS.ServerNames.GRPC.IndexGateway }}
+ tls_cipher_suites: {{ .TLS.CipherSuitesString }}
+ tls_min_version: {{ .TLS.MinTLSVersion }}
+{{- end }}
tracing:
enabled: false
analytics:
diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go
index a4a75f2854bd9..25bac96432127 100644
--- a/operator/internal/manifests/internal/config/options.go
+++ b/operator/internal/manifests/internal/config/options.go
@@ -5,6 +5,7 @@ import (
"math"
"strings"
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/storage"
)
@@ -12,6 +13,8 @@ import (
// Options is used to render the loki-config.yaml file template
type Options struct {
Stack lokiv1.LokiStackSpec
+ Gates configv1.FeatureGates
+ TLS TLSOptions
Namespace string
Name string
@@ -166,3 +169,42 @@ type RetentionOptions struct {
Enabled bool
DeleteWorkerCount uint
}
+
+type TLSOptions struct {
+ Ciphers []string
+ MinTLSVersion string
+ Paths TLSFilePaths
+ ServerNames TLSServerNames
+}
+
+func (o TLSOptions) CipherSuitesString() string {
+ return strings.Join(o.Ciphers, ",")
+}
+
+type TLSFilePaths struct {
+ CA string
+ GRPC TLSCertPath
+ HTTP TLSCertPath
+}
+
+type TLSCertPath struct {
+ Certificate string
+ Key string
+}
+
+type TLSServerNames struct {
+ GRPC GRPCServerNames
+ HTTP HTTPServerNames
+}
+
+type GRPCServerNames struct {
+ IndexGateway string
+ Ingester string
+ QueryFrontend string
+ Ruler string
+}
+
+type HTTPServerNames struct {
+ Compactor string
+ Querier string
+}
diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go
index c9929934c5a35..7e04986c435ae 100644
--- a/operator/internal/manifests/querier.go
+++ b/operator/internal/manifests/querier.go
@@ -7,8 +7,6 @@ import (
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/storage"
- "github.com/ViaQ/logerr/v2/kverrors"
- "github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -120,13 +118,6 @@ func NewQuerierDeployment(opts Options) *appsv1.Deployment {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
- if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
- podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
- fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- )
- }
-
if opts.Stack.Template != nil && opts.Stack.Template.Querier != nil {
podSpec.Tolerations = opts.Stack.Template.Querier.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Querier.NodeSelector
@@ -223,51 +214,10 @@ func NewQuerierHTTPService(opts Options) *corev1.Service {
func configureQuerierHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
serviceName := serviceNameQuerierHTTP(opts.Name)
- return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
func configureQuerierGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
- secretContainerSpec := corev1.Container{
- Args: []string{
- // Enable HTTP over TLS for compactor delete client
- "-boltdb.shipper.compactor.client.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-server-name=%s", fqdn(serviceNameCompactorHTTP(opts.Name), opts.Namespace)),
- // Enable GRPC over TLS for ingester client
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
- // Enable GRPC over TLS for query frontend client
- "-querier.frontend-client.tls-enabled=true",
- fmt.Sprintf("-querier.frontend-client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-querier.frontend-client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-querier.frontend-client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-querier.frontend-client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-querier.frontend-client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-querier.frontend-client.tls-server-name=%s", fqdn(serviceNameQueryFrontendGRPC(opts.Name), opts.Namespace)),
- // Enable GRPC over TLS for boltb-shipper index-gateway client
- "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
- },
- }
-
- if err := mergo.Merge(&deployment.Spec.Template.Spec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge container")
- }
-
serviceName := serviceNameQuerierGRPC(opts.Name)
return configureGRPCServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go
index cabf5a5d1dcb3..13c9271fa6ede 100644
--- a/operator/internal/manifests/query-frontend.go
+++ b/operator/internal/manifests/query-frontend.go
@@ -6,8 +6,6 @@ import (
"github.com/grafana/loki/operator/internal/manifests/internal/config"
- "github.com/ViaQ/logerr/v2/kverrors"
- "github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -127,13 +125,6 @@ func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
- if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
- podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
- fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- )
- }
-
if opts.Stack.Template != nil && opts.Stack.Template.QueryFrontend != nil {
podSpec.Tolerations = opts.Stack.Template.QueryFrontend.Tolerations
podSpec.NodeSelector = opts.Stack.Template.QueryFrontend.NodeSelector
@@ -229,33 +220,8 @@ func NewQueryFrontendHTTPService(opts Options) *corev1.Service {
}
func configureQueryFrontendHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
- var qfIdx int
- for i, c := range deployment.Spec.Template.Spec.Containers {
- if c.Name == lokiFrontendContainerName {
- qfIdx = i
- break
- }
- }
-
- url := fmt.Sprintf("https://%s:%d", fqdn(serviceNameQuerierHTTP(opts.Name), opts.Namespace), httpPort)
-
- containerSpec := corev1.Container{
- Args: []string{
- fmt.Sprintf("-frontend.tail-proxy-url=%s", url),
- fmt.Sprintf("-frontend.tail-tls-config.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-frontend.tail-tls-config.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-frontend.tail-tls-config.tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-frontend.tail-tls-config.tls-key-path=%s", lokiServerHTTPTLSKey()),
- },
- }
-
- if err := mergo.Merge(&deployment.Spec.Template.Spec.Containers[qfIdx], containerSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to add tls config args")
- }
-
serviceName := serviceNameQueryFrontendHTTP(opts.Name)
- return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
func configureQueryFrontendGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go
index 09aaa126d9f48..2228f02356001 100644
--- a/operator/internal/manifests/ruler.go
+++ b/operator/internal/manifests/ruler.go
@@ -8,8 +8,6 @@ import (
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/openshift"
- "github.com/ViaQ/logerr/v2/kverrors"
- "github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
@@ -44,7 +42,7 @@ func BuildRuler(opts Options) ([]client.Object, error) {
objs := []client.Object{}
if opts.Stack.Tenants != nil {
- if err := configureRulerStatefulSetForMode(statefulSet, opts.Stack.Tenants.Mode, opts.Name); err != nil {
+ if err := configureRulerStatefulSetForMode(statefulSet, opts.Stack.Tenants.Mode); err != nil {
return nil, err
}
@@ -154,13 +152,6 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet {
SecurityContext: podSecurityContext(opts.Gates.RuntimeSeccompProfile),
}
- if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
- podSpec.Containers[0].Args = append(podSpec.Containers[0].Args,
- fmt.Sprintf("-server.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-server.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- )
- }
-
if opts.Stack.Template != nil && opts.Stack.Template.Ruler != nil {
podSpec.Tolerations = opts.Stack.Template.Ruler.Tolerations
podSpec.NodeSelector = opts.Stack.Template.Ruler.NodeSelector
@@ -299,59 +290,15 @@ func NewRulerHTTPService(opts Options) *corev1.Service {
func configureRulerHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameRulerHTTP(opts.Name)
- return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
}
func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
- secretContainerSpec := corev1.Container{
- Args: []string{
- // Enable HTTP over TLS for compactor delete client
- "-boltdb.shipper.compactor.client.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-server-name=%s", fqdn(serviceNameCompactorHTTP(opts.Name), opts.Namespace)),
- // Enable GRPC over TLS for boltb-shipper index-gateway client
- "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
- // Enable GRPC over TLS for ingester client
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
- // Enable GRPC over TLS for ruler client
- "-ruler.client.tls-enabled=true",
- fmt.Sprintf("-ruler.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-ruler.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-ruler.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ruler.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ruler.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ruler.client.tls-server-name=%s", fqdn(serviceNameRulerGRPC(opts.Name), opts.Namespace)),
- },
- }
-
- if err := mergo.Merge(&sts.Spec.Template.Spec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge container")
- }
-
serviceName := serviceNameRulerGRPC(opts.Name)
return configureGRPCServicePKI(&sts.Spec.Template.Spec, serviceName)
}
-func configureRulerStatefulSetForMode(
- ss *appsv1.StatefulSet, mode lokiv1.ModeType,
- stackName string,
-) error {
+func configureRulerStatefulSetForMode(ss *appsv1.StatefulSet, mode lokiv1.ModeType) error {
switch mode {
case lokiv1.Static, lokiv1.Dynamic:
return nil // nothing to configure
diff --git a/operator/internal/manifests/service.go b/operator/internal/manifests/service.go
index 8ed6623452718..4aa4fcc16085d 100644
--- a/operator/internal/manifests/service.go
+++ b/operator/internal/manifests/service.go
@@ -1,8 +1,6 @@
package manifests
import (
- "fmt"
-
"github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
corev1 "k8s.io/api/core/v1"
@@ -67,12 +65,6 @@ func configureGRPCServicePKI(podSpec *corev1.PodSpec, serviceName string) error
MountPath: lokiServerGRPCTLSDir(),
},
},
- Args: []string{
- fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
- "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
- },
}
if err := mergo.Merge(podSpec, secretVolumeSpec, mergo.WithAppendSlice); err != nil {
@@ -86,7 +78,7 @@ func configureGRPCServicePKI(podSpec *corev1.PodSpec, serviceName string) error
return nil
}
-func configureHTTPServicePKI(podSpec *corev1.PodSpec, serviceName, minTLSVersion, tlsCipherSuites string) error {
+func configureHTTPServicePKI(podSpec *corev1.PodSpec, serviceName string) error {
secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
{
@@ -108,20 +100,6 @@ func configureHTTPServicePKI(podSpec *corev1.PodSpec, serviceName, minTLSVersion
MountPath: lokiServerHTTPTLSDir(),
},
},
- Args: []string{
- // Expose ready handler through internal server without requiring mTLS
- "-internal-server.enable=true",
- "-internal-server.http-listen-address=",
- fmt.Sprintf("-internal-server.http-tls-min-version=%s", minTLSVersion),
- fmt.Sprintf("-internal-server.http-tls-cipher-suites=%s", tlsCipherSuites),
- fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- // Require mTLS for any other handler
- fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-server.http-tls-client-auth=RequireAndVerifyClientCert",
- },
Ports: []corev1.ContainerPort{
{
Name: lokiInternalHTTPPortName,
diff --git a/operator/internal/manifests/service_test.go b/operator/internal/manifests/service_test.go
index c0600301407e6..f66a42906d2fe 100644
--- a/operator/internal/manifests/service_test.go
+++ b/operator/internal/manifests/service_test.go
@@ -5,13 +5,14 @@ import (
"strings"
"testing"
- configv1 "github.com/grafana/loki/operator/apis/config/v1"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
// Test that the service ports have matching deployment/statefulset/daemonset ports on the podspec.
@@ -313,7 +314,6 @@ func TestServices_WithEncryption(t *testing.T) {
tt := []struct {
desc string
buildFunc func(Options) ([]client.Object, error)
- wantArgs []string
wantPorts []corev1.ContainerPort
wantVolumeMounts []corev1.VolumeMount
wantVolumes []corev1.Volume
@@ -321,24 +321,6 @@ func TestServices_WithEncryption(t *testing.T) {
{
desc: "compactor",
buildFunc: BuildCompactor,
- wantArgs: []string{
- "-internal-server.enable=true",
- "-internal-server.http-listen-address=",
- fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
- "-internal-server.http-tls-min-version=VersionTLS12",
- "-server.tls-cipher-suites=cipher1,cipher2",
- "-server.tls-min-version=VersionTLS12",
- fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-server.http-tls-client-auth=RequireAndVerifyClientCert",
- fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
- "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
- },
wantPorts: []corev1.ContainerPort{
{
Name: lokiInternalHTTPPortName,
@@ -395,31 +377,6 @@ func TestServices_WithEncryption(t *testing.T) {
{
desc: "distributor",
buildFunc: BuildDistributor,
- wantArgs: []string{
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
- "-ingester.client.tls-min-version=VersionTLS12",
- "-ingester.client.tls-cipher-suites=cipher1,cipher2",
- "-internal-server.enable=true",
- "-internal-server.http-listen-address=",
- fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
- "-internal-server.http-tls-min-version=VersionTLS12",
- "-server.tls-cipher-suites=cipher1,cipher2",
- "-server.tls-min-version=VersionTLS12",
- fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-server.http-tls-client-auth=RequireAndVerifyClientCert",
- fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
- "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
- },
wantPorts: []corev1.ContainerPort{
{
Name: lokiInternalHTTPPortName,
@@ -476,24 +433,6 @@ func TestServices_WithEncryption(t *testing.T) {
{
desc: "index-gateway",
buildFunc: BuildIndexGateway,
- wantArgs: []string{
- "-internal-server.enable=true",
- "-internal-server.http-listen-address=",
- fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
- "-internal-server.http-tls-min-version=VersionTLS12",
- "-server.tls-cipher-suites=cipher1,cipher2",
- "-server.tls-min-version=VersionTLS12",
- fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-server.http-tls-client-auth=RequireAndVerifyClientCert",
- fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
- "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
- },
wantPorts: []corev1.ContainerPort{
{
Name: lokiInternalHTTPPortName,
@@ -550,38 +489,6 @@ func TestServices_WithEncryption(t *testing.T) {
{
desc: "ingester",
buildFunc: BuildIngester,
- wantArgs: []string{
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
- "-ingester.client.tls-min-version=VersionTLS12",
- "-ingester.client.tls-cipher-suites=cipher1,cipher2",
- "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNs)),
- "-boltdb.shipper.index-gateway-client.grpc.tls-min-version=VersionTLS12",
- "-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=cipher1,cipher2",
- "-internal-server.enable=true",
- "-internal-server.http-listen-address=",
- fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
- "-internal-server.http-tls-min-version=VersionTLS12",
- "-server.tls-cipher-suites=cipher1,cipher2",
- "-server.tls-min-version=VersionTLS12",
- fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-server.http-tls-client-auth=RequireAndVerifyClientCert",
- fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
- "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
- },
wantPorts: []corev1.ContainerPort{
{
Name: lokiInternalHTTPPortName,
@@ -638,52 +545,6 @@ func TestServices_WithEncryption(t *testing.T) {
{
desc: "querier",
buildFunc: BuildQuerier,
- wantArgs: []string{
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
- "-ingester.client.tls-min-version=VersionTLS12",
- "-ingester.client.tls-cipher-suites=cipher1,cipher2",
- "-querier.frontend-client.tls-enabled=true",
- fmt.Sprintf("-querier.frontend-client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-querier.frontend-client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-querier.frontend-client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-querier.frontend-client.tls-server-name=%s", fqdn(serviceNameQueryFrontendGRPC(stackName), stackNs)),
- "-querier.frontend-client.tls-min-version=VersionTLS12",
- "-querier.frontend-client.tls-cipher-suites=cipher1,cipher2",
- "-boltdb.shipper.compactor.client.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-server-name=%s", fqdn(serviceNameCompactorHTTP(stackName), stackNs)),
- "-boltdb.shipper.compactor.client.tls-min-version=VersionTLS12",
- "-boltdb.shipper.compactor.client.tls-cipher-suites=cipher1,cipher2",
- "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNs)),
- "-boltdb.shipper.index-gateway-client.grpc.tls-min-version=VersionTLS12",
- "-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=cipher1,cipher2",
- "-internal-server.enable=true",
- "-internal-server.http-listen-address=",
- fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
- "-internal-server.http-tls-min-version=VersionTLS12",
- "-server.tls-cipher-suites=cipher1,cipher2",
- "-server.tls-min-version=VersionTLS12",
- fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-server.http-tls-client-auth=RequireAndVerifyClientCert",
- fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
- "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
- },
wantPorts: []corev1.ContainerPort{
{
Name: lokiInternalHTTPPortName,
@@ -740,30 +601,6 @@ func TestServices_WithEncryption(t *testing.T) {
{
desc: "query-frontend",
buildFunc: BuildQueryFrontend,
- wantArgs: []string{
- "-frontend.tail-tls-config.tls-min-version=VersionTLS12",
- "-frontend.tail-tls-config.tls-cipher-suites=cipher1,cipher2",
- fmt.Sprintf("-frontend.tail-tls-config.tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-frontend.tail-tls-config.tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-frontend.tail-proxy-url=https://test-querier-http.ns.svc.cluster.local:3100",
- fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s", signingCAPath()),
- "-internal-server.enable=true",
- "-internal-server.http-listen-address=",
- fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
- "-internal-server.http-tls-min-version=VersionTLS12",
- "-server.tls-cipher-suites=cipher1,cipher2",
- "-server.tls-min-version=VersionTLS12",
- fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-server.http-tls-client-auth=RequireAndVerifyClientCert",
- fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
- "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
- },
wantPorts: []corev1.ContainerPort{
{
Name: lokiInternalHTTPPortName,
@@ -820,52 +657,6 @@ func TestServices_WithEncryption(t *testing.T) {
{
desc: "ruler",
buildFunc: BuildRuler,
- wantArgs: []string{
- "-boltdb.shipper.compactor.client.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.compactor.client.tls-server-name=%s", fqdn(serviceNameCompactorHTTP(stackName), stackNs)),
- "-boltdb.shipper.compactor.client.tls-min-version=VersionTLS12",
- "-boltdb.shipper.compactor.client.tls-cipher-suites=cipher1,cipher2",
- "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNs)),
- "-boltdb.shipper.index-gateway-client.grpc.tls-min-version=VersionTLS12",
- "-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=cipher1,cipher2",
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
- "-ingester.client.tls-min-version=VersionTLS12",
- "-ingester.client.tls-cipher-suites=cipher1,cipher2",
- "-ruler.client.tls-enabled=true",
- fmt.Sprintf("-ruler.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ruler.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-ruler.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
- fmt.Sprintf("-ruler.client.tls-server-name=%s", fqdn(serviceNameRulerGRPC(stackName), stackNs)),
- "-ruler.client.tls-min-version=VersionTLS12",
- "-ruler.client.tls-cipher-suites=cipher1,cipher2",
- "-internal-server.enable=true",
- "-internal-server.http-listen-address=",
- fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
- "-internal-server.http-tls-min-version=VersionTLS12",
- "-server.tls-cipher-suites=cipher1,cipher2",
- "-server.tls-min-version=VersionTLS12",
- fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
- fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
- "-server.http-tls-client-auth=RequireAndVerifyClientCert",
- fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
- "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
- },
wantPorts: []corev1.ContainerPort{
{
Name: lokiInternalHTTPPortName,
@@ -945,16 +736,6 @@ func TestServices_WithEncryption(t *testing.T) {
strings.Contains(s, "ca") // Certificate authorities
}
- // Check args not missing
- for _, arg := range test.wantArgs {
- require.Contains(t, pod.Containers[0].Args, arg)
- }
- for _, arg := range pod.Containers[0].Args {
- if isEncryptionRelated(arg) {
- require.Contains(t, test.wantArgs, arg)
- }
- }
-
// Check ports not missing
for _, port := range test.wantPorts {
require.Contains(t, pod.Containers[0].Ports, port)
|
operator
|
Move Loki TLS configuration into ConfigMap (#7738)
|
d8b1818b21972ed5c727d74cd5941f12e422a841
|
2025-01-24 20:48:08
|
renovate[bot]
|
chore(deps): update helm release minio to v5.4.0 (main) (#15946)
| false
|
diff --git a/production/helm/loki/Chart.lock b/production/helm/loki/Chart.lock
index beb95db3974ce..60417bd7d191b 100644
--- a/production/helm/loki/Chart.lock
+++ b/production/helm/loki/Chart.lock
@@ -1,12 +1,12 @@
dependencies:
- name: minio
repository: https://charts.min.io/
- version: 5.3.0
+ version: 5.4.0
- name: grafana-agent-operator
repository: https://grafana.github.io/helm-charts
version: 0.5.0
- name: rollout-operator
repository: https://grafana.github.io/helm-charts
version: 0.22.0
-digest: sha256:b52eac8eb486759c2b4b55ac111a00680bb3d872c9ad89434bc58b087fcd1d80
-generated: "2024-12-14T21:29:47.873748325Z"
+digest: sha256:48d111dd19a29685e51e7817f5e0c16366380d384dc5e14ca299755896489eff
+generated: "2025-01-24T15:11:31.126201473Z"
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 234639a93a95c..1e06ae151c90e 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -13,7 +13,7 @@ icon: https://grafana.com/docs/loki/latest/logo_and_name.png
dependencies:
- name: minio
alias: minio
- version: 5.3.0
+ version: 5.4.0
repository: https://charts.min.io/
condition: minio.enabled
- name: grafana-agent-operator
|
chore
|
update helm release minio to v5.4.0 (main) (#15946)
|
80ea621ff3c7be677502d404c1bb89d1435b20ed
|
2023-01-25 17:42:28
|
nicoche
|
promtail: Add `max-line-size-truncate` (#8233)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ffa76c1fb07ac..38e9d07ed97d1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -51,6 +51,7 @@
* [7973](https://github.com/grafana/loki/pull/7973) **chodges15**: Add configuration to drop rate limited batches in Loki client and new metric label for drop reason.
* [8153](https://github.com/grafana/loki/pull/8153) **kavirajk**: promtail: Add `max-line-size` limit to drop on client side
* [8096](https://github.com/grafana/loki/pull/8096) **kavirajk**: doc(promtail): Doc about how log rotate works with promtail
+* [8233](https://github.com/grafana/loki/pull/8233) **nicoche**: promtail: Add `max-line-size-truncate` limit to truncate too long lines on client side
##### Enhancements
diff --git a/clients/cmd/docker-driver/loki.go b/clients/cmd/docker-driver/loki.go
index 9221ce5182ef3..cc15e71bda2ee 100644
--- a/clients/cmd/docker-driver/loki.go
+++ b/clients/cmd/docker-driver/loki.go
@@ -39,7 +39,7 @@ func New(logCtx logger.Info, logger log.Logger) (logger.Logger, error) {
return nil, err
}
m := client.NewMetrics(prometheus.DefaultRegisterer)
- c, err := client.New(m, cfg.clientConfig, 0, 0, logger)
+ c, err := client.New(m, cfg.clientConfig, 0, 0, false, logger)
if err != nil {
return nil, err
}
diff --git a/clients/cmd/fluent-bit/client.go b/clients/cmd/fluent-bit/client.go
index 542d29f4ac049..11c2fa1d0386b 100644
--- a/clients/cmd/fluent-bit/client.go
+++ b/clients/cmd/fluent-bit/client.go
@@ -11,5 +11,5 @@ func NewClient(cfg *config, logger log.Logger, metrics *client.Metrics) (client.
if cfg.bufferConfig.buffer {
return NewBuffer(cfg, logger, metrics)
}
- return client.New(metrics, cfg.clientConfig, 0, 0, logger)
+ return client.New(metrics, cfg.clientConfig, 0, 0, false, logger)
}
diff --git a/clients/cmd/fluent-bit/dque.go b/clients/cmd/fluent-bit/dque.go
index b2f85a9608393..ad4ec2318ec54 100644
--- a/clients/cmd/fluent-bit/dque.go
+++ b/clients/cmd/fluent-bit/dque.go
@@ -72,7 +72,7 @@ func newDque(cfg *config, logger log.Logger, metrics *client.Metrics) (client.Cl
_ = q.queue.TurboOn()
}
- q.loki, err = client.New(metrics, cfg.clientConfig, 0, 0, logger)
+ q.loki, err = client.New(metrics, cfg.clientConfig, 0, 0, false, logger)
if err != nil {
return nil, err
}
diff --git a/clients/pkg/promtail/client/client.go b/clients/pkg/promtail/client/client.go
index 6b21eba077f9c..50e03675de15f 100644
--- a/clients/pkg/promtail/client/client.go
+++ b/clients/pkg/promtail/client/client.go
@@ -34,19 +34,19 @@ const (
// pipeline stages
ReservedLabelTenantID = "__tenant_id__"
- LatencyLabel = "filename"
- HostLabel = "host"
- ClientLabel = "client"
- TenantLabel = "tenant"
- DropReasonLabel = "reason"
-
- DropReasonGeneric = "ingester_error"
- DropReasonRateLimited = "rate_limited"
- DropReasonStreamLimited = "stream_limited"
- DropReasongMaxLineSizeLimited = "max_line_size_limited"
+ LatencyLabel = "filename"
+ HostLabel = "host"
+ ClientLabel = "client"
+ TenantLabel = "tenant"
+ ReasonLabel = "reason"
+
+ ReasonGeneric = "ingester_error"
+ ReasonRateLimited = "rate_limited"
+ ReasonStreamLimited = "stream_limited"
+ ReasonLineTooLong = "line_too_long"
)
-var DropReasons = []string{DropReasonGeneric, DropReasonRateLimited, DropReasonStreamLimited}
+var Reasons = []string{ReasonGeneric, ReasonRateLimited, ReasonStreamLimited, ReasonLineTooLong}
var UserAgent = fmt.Sprintf("promtail/%s", build.Version)
@@ -56,6 +56,8 @@ type Metrics struct {
droppedBytes *prometheus.CounterVec
sentEntries *prometheus.CounterVec
droppedEntries *prometheus.CounterVec
+ mutatedEntries *prometheus.CounterVec
+ mutatedBytes *prometheus.CounterVec
requestDuration *prometheus.HistogramVec
batchRetries *prometheus.CounterVec
countersWithHost []*prometheus.CounterVec
@@ -80,7 +82,7 @@ func NewMetrics(reg prometheus.Registerer) *Metrics {
Namespace: "promtail",
Name: "dropped_bytes_total",
Help: "Number of bytes dropped because failed to be sent to the ingester after all retries.",
- }, []string{HostLabel, TenantLabel, DropReasonLabel})
+ }, []string{HostLabel, TenantLabel, ReasonLabel})
m.sentEntries = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "promtail",
Name: "sent_entries_total",
@@ -90,7 +92,17 @@ func NewMetrics(reg prometheus.Registerer) *Metrics {
Namespace: "promtail",
Name: "dropped_entries_total",
Help: "Number of log entries dropped because failed to be sent to the ingester after all retries.",
- }, []string{HostLabel, TenantLabel, DropReasonLabel})
+ }, []string{HostLabel, TenantLabel, ReasonLabel})
+ m.mutatedEntries = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "promtail",
+ Name: "mutated_entries_total",
+ Help: "The total number of log entries that have been mutated.",
+ }, []string{HostLabel, TenantLabel, ReasonLabel})
+ m.mutatedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "promtail",
+ Name: "mutated_bytes_total",
+ Help: "The total number of bytes that have been mutated.",
+ }, []string{HostLabel, TenantLabel, ReasonLabel})
m.requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "promtail",
Name: "request_duration_seconds",
@@ -111,7 +123,7 @@ func NewMetrics(reg prometheus.Registerer) *Metrics {
}
m.countersWithHostTenantReason = []*prometheus.CounterVec{
- m.droppedBytes, m.droppedEntries,
+ m.droppedBytes, m.droppedEntries, m.mutatedEntries, m.mutatedBytes,
}
if reg != nil {
@@ -120,6 +132,8 @@ func NewMetrics(reg prometheus.Registerer) *Metrics {
m.droppedBytes = mustRegisterOrGet(reg, m.droppedBytes).(*prometheus.CounterVec)
m.sentEntries = mustRegisterOrGet(reg, m.sentEntries).(*prometheus.CounterVec)
m.droppedEntries = mustRegisterOrGet(reg, m.droppedEntries).(*prometheus.CounterVec)
+ m.mutatedEntries = mustRegisterOrGet(reg, m.mutatedEntries).(*prometheus.CounterVec)
+ m.mutatedBytes = mustRegisterOrGet(reg, m.mutatedBytes).(*prometheus.CounterVec)
m.requestDuration = mustRegisterOrGet(reg, m.requestDuration).(*prometheus.HistogramVec)
m.batchRetries = mustRegisterOrGet(reg, m.batchRetries).(*prometheus.CounterVec)
}
@@ -160,24 +174,25 @@ type client struct {
externalLabels model.LabelSet
// ctx is used in any upstream calls from the `client`.
- ctx context.Context
- cancel context.CancelFunc
- maxStreams int
- maxLineSize int
+ ctx context.Context
+ cancel context.CancelFunc
+ maxStreams int
+ maxLineSize int
+ maxLineSizeTruncate bool
}
// Tripperware can wrap a roundtripper.
type Tripperware func(http.RoundTripper) http.RoundTripper
// New makes a new Client.
-func New(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, logger log.Logger) (Client, error) {
+func New(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, maxLineSizeTruncate bool, logger log.Logger) (Client, error) {
if cfg.StreamLagLabels.String() != "" {
return nil, fmt.Errorf("client config stream_lag_labels is deprecated and the associated metric has been removed, stream_lag_labels: %+v", cfg.StreamLagLabels.String())
}
- return newClient(metrics, cfg, maxStreams, maxLineSize, logger)
+ return newClient(metrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger)
}
-func newClient(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, logger log.Logger) (*client, error) {
+func newClient(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, maxLineSizeTruncate bool, logger log.Logger) (*client, error) {
if cfg.URL.URL == nil {
return nil, errors.New("client needs target URL")
@@ -192,11 +207,12 @@ func newClient(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, logger
metrics: metrics,
name: asSha256(cfg),
- externalLabels: cfg.ExternalLabels.LabelSet,
- ctx: ctx,
- cancel: cancel,
- maxStreams: maxStreams,
- maxLineSize: maxLineSize,
+ externalLabels: cfg.ExternalLabels.LabelSet,
+ ctx: ctx,
+ cancel: cancel,
+ maxStreams: maxStreams,
+ maxLineSize: maxLineSize,
+ maxLineSizeTruncate: maxLineSizeTruncate,
}
if cfg.Name != "" {
c.name = cfg.Name
@@ -226,8 +242,8 @@ func newClient(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, logger
}
// NewWithTripperware creates a new Loki client with a custom tripperware.
-func NewWithTripperware(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, logger log.Logger, tp Tripperware) (Client, error) {
- c, err := newClient(metrics, cfg, maxStreams, maxLineSize, logger)
+func NewWithTripperware(metrics *Metrics, cfg Config, maxStreams, maxLineSize int, maxLineSizeTruncate bool, logger log.Logger, tp Tripperware) (Client, error) {
+ c, err := newClient(metrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger)
if err != nil {
return nil, err
}
@@ -243,7 +259,7 @@ func (c *client) initBatchMetrics(tenantID string) {
// Initialize counters to 0 so the metrics are exported before the first
// occurrence of incrementing to avoid missing metrics.
for _, counter := range c.metrics.countersWithHostTenantReason {
- for _, reason := range DropReasons {
+ for _, reason := range Reasons {
counter.WithLabelValues(c.cfg.URL.Host, tenantID, reason).Add(0)
}
}
@@ -289,10 +305,17 @@ func (c *client) run() {
e, tenantID := c.processEntry(e)
- // drop the entry because its length is greater than maxLineSize. maxLineSize == 0 means disabled.
+ // Either drop or mutate the log entry because its length is greater than maxLineSize. maxLineSize == 0 means disabled.
if c.maxLineSize != 0 && len(e.Line) > c.maxLineSize {
- c.metrics.droppedEntries.WithLabelValues(c.cfg.URL.Host, tenantID, DropReasongMaxLineSizeLimited).Inc()
- break
+ if !c.maxLineSizeTruncate {
+ c.metrics.droppedEntries.WithLabelValues(c.cfg.URL.Host, tenantID, ReasonLineTooLong).Inc()
+ c.metrics.droppedBytes.WithLabelValues(c.cfg.URL.Host, tenantID, ReasonLineTooLong).Add(float64(len(e.Line)))
+ break
+ }
+
+ c.metrics.mutatedEntries.WithLabelValues(c.cfg.URL.Host, tenantID, ReasonLineTooLong).Inc()
+ c.metrics.mutatedBytes.WithLabelValues(c.cfg.URL.Host, tenantID, ReasonLineTooLong).Add(float64(len(e.Line) - c.maxLineSize))
+ e.Line = e.Line[:c.maxLineSize]
}
batch, ok := batches[tenantID]
@@ -317,9 +340,9 @@ func (c *client) run() {
err := batch.add(e)
if err != nil {
level.Error(c.logger).Log("msg", "batch add err", "tenant", tenantID, "error", err)
- reason := DropReasonGeneric
+ reason := ReasonGeneric
if err.Error() == errMaxStreamsLimitExceeded {
- reason = DropReasonStreamLimited
+ reason = ReasonStreamLimited
}
c.metrics.droppedBytes.WithLabelValues(c.cfg.URL.Host, tenantID, reason).Add(float64(len(e.Line)))
c.metrics.droppedEntries.WithLabelValues(c.cfg.URL.Host, tenantID, reason).Inc()
@@ -376,8 +399,8 @@ func (c *client) sendBatch(tenantID string, batch *batch) {
// Immediately drop rate limited batches to avoid HOL blocking for other tenants not experiencing throttling
if c.cfg.DropRateLimitedBatches && batchIsRateLimited(status) {
level.Warn(c.logger).Log("msg", "dropping batch due to rate limiting applied at ingester")
- c.metrics.droppedBytes.WithLabelValues(c.cfg.URL.Host, tenantID, DropReasonRateLimited).Add(bufBytes)
- c.metrics.droppedEntries.WithLabelValues(c.cfg.URL.Host, tenantID, DropReasonRateLimited).Add(float64(entriesCount))
+ c.metrics.droppedBytes.WithLabelValues(c.cfg.URL.Host, tenantID, ReasonRateLimited).Add(bufBytes)
+ c.metrics.droppedEntries.WithLabelValues(c.cfg.URL.Host, tenantID, ReasonRateLimited).Add(float64(entriesCount))
return
}
@@ -407,9 +430,9 @@ func (c *client) sendBatch(tenantID string, batch *batch) {
level.Error(c.logger).Log("msg", "final error sending batch", "status", status, "tenant", tenantID, "error", err)
// If the reason for the last retry error was rate limiting, count the drops as such, even if the previous errors
// were for a different reason
- dropReason := DropReasonGeneric
+ dropReason := ReasonGeneric
if batchIsRateLimited(status) {
- dropReason = DropReasonRateLimited
+ dropReason = ReasonRateLimited
}
c.metrics.droppedBytes.WithLabelValues(c.cfg.URL.Host, tenantID, dropReason).Add(bufBytes)
c.metrics.droppedEntries.WithLabelValues(c.cfg.URL.Host, tenantID, dropReason).Add(float64(entriesCount))
diff --git a/clients/pkg/promtail/client/client_test.go b/clients/pkg/promtail/client/client_test.go
index 10c529572b34f..c6475040fcccd 100644
--- a/clients/pkg/promtail/client/client_test.go
+++ b/clients/pkg/promtail/client/client_test.go
@@ -45,17 +45,18 @@ type receivedReq struct {
func TestClient_Handle(t *testing.T) {
tests := map[string]struct {
- clientBatchSize int
- clientBatchWait time.Duration
- clientMaxRetries int
- clientMaxLineSize int
- clientTenantID string
- clientDropRateLimited bool
- serverResponseStatus int
- inputEntries []api.Entry
- inputDelay time.Duration
- expectedReqs []receivedReq
- expectedMetrics string
+ clientBatchSize int
+ clientBatchWait time.Duration
+ clientMaxRetries int
+ clientMaxLineSize int
+ clientMaxLineSizeTruncate bool
+ clientTenantID string
+ clientDropRateLimited bool
+ serverResponseStatus int
+ inputEntries []api.Entry
+ inputDelay time.Duration
+ expectedReqs []receivedReq
+ expectedMetrics string
}{
"batch log entries together until the batch size is reached": {
clientBatchSize: 10,
@@ -80,17 +81,31 @@ func TestClient_Handle(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant=""} 0
`,
},
- "log entries have max_line_size exceeded": {
- clientBatchSize: 10,
- clientBatchWait: 100 * time.Millisecond,
- clientMaxRetries: 3,
- clientMaxLineSize: 10, // any log line more than this length should be discarded
- serverResponseStatus: 200,
- inputEntries: []api.Entry{logEntries[0], logEntries[1], logEntries[6]}, // this logEntries[6] entries has line more than size 10
+ "dropping log entries that have max_line_size exceeded": {
+ clientBatchSize: 10,
+ clientBatchWait: 100 * time.Millisecond,
+ clientMaxRetries: 3,
+ clientMaxLineSize: 10, // any log line more than this length should be discarded
+ clientMaxLineSizeTruncate: false,
+ serverResponseStatus: 200,
+ inputEntries: []api.Entry{logEntries[0], logEntries[1], logEntries[6]}, // this logEntries[6] entries has line more than size 10
expectedReqs: []receivedReq{
{
tenantID: "",
@@ -104,9 +119,66 @@ func TestClient_Handle(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 1
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
- promtail_dropped_entries_total{host="__HOST__",reason="max_line_size_limited",tenant=""} 1
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ `,
+ },
+ "truncating log entries that have max_line_size exceeded": {
+ clientBatchSize: 10,
+ clientBatchWait: 100 * time.Millisecond,
+ clientMaxRetries: 3,
+ clientMaxLineSize: 10,
+ clientMaxLineSizeTruncate: true,
+ serverResponseStatus: 200,
+ inputEntries: []api.Entry{logEntries[0], logEntries[1], logEntries[6]}, // logEntries[6]'s line is greater than 10 bytes
+ expectedReqs: []receivedReq{
+ {
+ tenantID: "",
+ pushReq: logproto.PushRequest{Streams: []logproto.Stream{{Labels: "{}", Entries: []logproto.Entry{
+ logEntries[0].Entry,
+ logEntries[1].Entry,
+ {
+ Timestamp: logEntries[6].Entry.Timestamp,
+ Line: logEntries[6].Line[:10],
+ },
+ }}}},
+ },
+ },
+ expectedMetrics: `
+ # HELP promtail_sent_entries_total Number of log entries sent to the ingester.
+ # TYPE promtail_sent_entries_total counter
+ promtail_sent_entries_total{host="__HOST__"} 3.0
+ # HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
+ # TYPE promtail_dropped_entries_total counter
+ promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 1
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant=""} 4
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant=""} 0
`,
},
@@ -134,8 +206,21 @@ func TestClient_Handle(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant=""} 0
`,
},
"retry send a batch up to backoff's max retries in case the server responds with a 5xx": {
@@ -162,8 +247,21 @@ func TestClient_Handle(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 1
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant=""} 0
# HELP promtail_sent_entries_total Number of log entries sent to the ingester.
# TYPE promtail_sent_entries_total counter
promtail_sent_entries_total{host="__HOST__"} 0
@@ -185,8 +283,21 @@ func TestClient_Handle(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 1
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant=""} 0
# HELP promtail_sent_entries_total Number of log entries sent to the ingester.
# TYPE promtail_sent_entries_total counter
promtail_sent_entries_total{host="__HOST__"} 0
@@ -216,8 +327,21 @@ func TestClient_Handle(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 1
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant=""} 0
# HELP promtail_sent_entries_total Number of log entries sent to the ingester.
# TYPE promtail_sent_entries_total counter
promtail_sent_entries_total{host="__HOST__"} 0
@@ -240,8 +364,21 @@ func TestClient_Handle(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 1
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant=""} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant=""} 0
# HELP promtail_sent_entries_total Number of log entries sent to the ingester.
# TYPE promtail_sent_entries_total counter
promtail_sent_entries_total{host="__HOST__"} 0
@@ -267,8 +404,21 @@ func TestClient_Handle(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__", reason="ingester_error", tenant="tenant-default"} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant="tenant-default"} 0
promtail_dropped_entries_total{host="__HOST__", reason="rate_limited", tenant="tenant-default"} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant="tenant-default"} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant="tenant-default"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant="tenant-default"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant="tenant-default"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant="tenant-default"} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant="tenant-default"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant="tenant-default"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant="tenant-default"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant="tenant-default"} 0
`,
},
"batch log entries together honoring the tenant ID overridden while processing the pipeline stages": {
@@ -301,12 +451,43 @@ func TestClient_Handle(t *testing.T) {
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant="tenant-1"} 0
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant="tenant-2"} 0
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant="tenant-default"} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant="tenant-1"} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant="tenant-2"} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant="tenant-default"} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant="tenant-1"} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant="tenant-2"} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant="tenant-default"} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant="tenant-1"} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant="tenant-2"} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant="tenant-default"} 0
+ # HELP promtail_mutated_entries_total The total number of log entries that have been mutated.
+ # TYPE promtail_mutated_entries_total counter
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant="tenant-1"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant="tenant-2"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="ingester_error",tenant="tenant-default"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant="tenant-1"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant="tenant-2"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="line_too_long",tenant="tenant-default"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant="tenant-1"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant="tenant-2"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="rate_limited",tenant="tenant-default"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant="tenant-1"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant="tenant-2"} 0
+ promtail_mutated_entries_total{host="__HOST__",reason="stream_limited",tenant="tenant-default"} 0
+ # HELP promtail_mutated_bytes_total The total number of bytes that have been mutated.
+ # TYPE promtail_mutated_bytes_total counter
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant="tenant-1"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant="tenant-2"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="ingester_error",tenant="tenant-default"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant="tenant-1"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant="tenant-2"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="line_too_long",tenant="tenant-default"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant="tenant-1"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant="tenant-2"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="rate_limited",tenant="tenant-default"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant="tenant-1"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant="tenant-2"} 0
+ promtail_mutated_bytes_total{host="__HOST__",reason="stream_limited",tenant="tenant-default"} 0
`,
},
}
@@ -342,7 +523,7 @@ func TestClient_Handle(t *testing.T) {
}
m := NewMetrics(reg)
- c, err := New(m, cfg, 0, testData.clientMaxLineSize, log.NewNopLogger())
+ c, err := New(m, cfg, 0, testData.clientMaxLineSize, testData.clientMaxLineSizeTruncate, log.NewNopLogger())
require.NoError(t, err)
// Send all the input log entries
@@ -378,7 +559,7 @@ func TestClient_Handle(t *testing.T) {
fmt.Printf("Expected reqs: %#v\n", testData.expectedReqs)
expectedMetrics := strings.Replace(testData.expectedMetrics, "__HOST__", serverURL.Host, -1)
- err = testutil.GatherAndCompare(reg, strings.NewReader(expectedMetrics), "promtail_sent_entries_total", "promtail_dropped_entries_total")
+ err = testutil.GatherAndCompare(reg, strings.NewReader(expectedMetrics), "promtail_sent_entries_total", "promtail_dropped_entries_total", "promtail_mutated_entries_total", "promtail_mutated_bytes_total")
assert.NoError(t, err)
})
}
@@ -421,6 +602,7 @@ func TestClient_StopNow(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
`,
@@ -442,6 +624,7 @@ func TestClient_StopNow(t *testing.T) {
# HELP promtail_dropped_entries_total Number of log entries dropped because failed to be sent to the ingester after all retries.
# TYPE promtail_dropped_entries_total counter
promtail_dropped_entries_total{host="__HOST__",reason="ingester_error",tenant=""} 0
+ promtail_dropped_entries_total{host="__HOST__",reason="line_too_long",tenant=""} 0
promtail_dropped_entries_total{host="__HOST__",reason="rate_limited",tenant=""} 1
promtail_dropped_entries_total{host="__HOST__",reason="stream_limited",tenant=""} 0
# HELP promtail_sent_entries_total Number of log entries sent to the ingester.
@@ -481,7 +664,7 @@ func TestClient_StopNow(t *testing.T) {
}
m := NewMetrics(reg)
- cl, err := New(m, cfg, 0, 0, log.NewNopLogger())
+ cl, err := New(m, cfg, 0, 0, false, log.NewNopLogger())
require.NoError(t, err)
// Send all the input log entries
@@ -557,7 +740,7 @@ func Test_Tripperware(t *testing.T) {
var called bool
c, err := NewWithTripperware(metrics, Config{
URL: flagext.URLValue{URL: url},
- }, 0, 0, log.NewNopLogger(), func(rt http.RoundTripper) http.RoundTripper {
+ }, 0, 0, false, log.NewNopLogger(), func(rt http.RoundTripper) http.RoundTripper {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
require.Equal(t, r.URL.String(), "http://foo.com")
called = true
diff --git a/clients/pkg/promtail/client/logger.go b/clients/pkg/promtail/client/logger.go
index 0ffb244e39661..bee9fadf94c44 100644
--- a/clients/pkg/promtail/client/logger.go
+++ b/clients/pkg/promtail/client/logger.go
@@ -37,7 +37,7 @@ type logger struct {
// NewLogger creates a new client logger that logs entries instead of sending them.
func NewLogger(metrics *Metrics, log log.Logger, cfgs ...Config) (Client, error) {
// make sure the clients config is valid
- c, err := NewMulti(metrics, log, 0, 0, cfgs...)
+ c, err := NewMulti(metrics, log, 0, 0, false, cfgs...)
if err != nil {
return nil, err
}
diff --git a/clients/pkg/promtail/client/multi.go b/clients/pkg/promtail/client/multi.go
index 9243c91ad8f20..35eb05fabbb3e 100644
--- a/clients/pkg/promtail/client/multi.go
+++ b/clients/pkg/promtail/client/multi.go
@@ -21,7 +21,7 @@ type MultiClient struct {
}
// NewMulti creates a new client
-func NewMulti(metrics *Metrics, logger log.Logger, maxStreams, maxLineSize int, cfgs ...Config) (Client, error) {
+func NewMulti(metrics *Metrics, logger log.Logger, maxStreams, maxLineSize int, maxLineSizeTruncate bool, cfgs ...Config) (Client, error) {
var fake struct{}
if len(cfgs) == 0 {
@@ -30,7 +30,7 @@ func NewMulti(metrics *Metrics, logger log.Logger, maxStreams, maxLineSize int,
clientsCheck := make(map[string]struct{})
clients := make([]Client, 0, len(cfgs))
for _, cfg := range cfgs {
- client, err := New(metrics, cfg, maxStreams, maxLineSize, logger)
+ client, err := New(metrics, cfg, maxStreams, maxLineSize, maxLineSizeTruncate, logger)
if err != nil {
return nil, err
}
diff --git a/clients/pkg/promtail/client/multi_test.go b/clients/pkg/promtail/client/multi_test.go
index 1b45a472c95a0..fded80f94bbfe 100644
--- a/clients/pkg/promtail/client/multi_test.go
+++ b/clients/pkg/promtail/client/multi_test.go
@@ -27,7 +27,7 @@ var (
)
func TestNewMulti(t *testing.T) {
- _, err := NewMulti(nilMetrics, util_log.Logger, 0, 0, []Config{}...)
+ _, err := NewMulti(nilMetrics, util_log.Logger, 0, 0, false, []Config{}...)
if err == nil {
t.Fatal("expected err but got nil")
}
@@ -46,7 +46,7 @@ func TestNewMulti(t *testing.T) {
ExternalLabels: lokiflag.LabelSet{LabelSet: model.LabelSet{"hi": "there"}},
}
- clients, err := NewMulti(metrics, util_log.Logger, 0, 0, cc1, cc2)
+ clients, err := NewMulti(metrics, util_log.Logger, 0, 0, false, cc1, cc2)
if err != nil {
t.Fatalf("expected err: nil got:%v", err)
}
@@ -69,7 +69,7 @@ func TestNewMulti(t *testing.T) {
}
func TestNewMulti_BlockDuplicates(t *testing.T) {
- _, err := NewMulti(nilMetrics, util_log.Logger, 0, 0, []Config{}...)
+ _, err := NewMulti(nilMetrics, util_log.Logger, 0, 0, false, []Config{}...)
if err == nil {
t.Fatal("expected err but got nil")
}
@@ -82,11 +82,11 @@ func TestNewMulti_BlockDuplicates(t *testing.T) {
}
cc1Copy := cc1
- _, err = NewMulti(metrics, util_log.Logger, 0, 0, cc1, cc1Copy)
+ _, err = NewMulti(metrics, util_log.Logger, 0, 0, false, cc1, cc1Copy)
require.Error(t, err, "expected NewMulti to reject duplicate client configs")
cc1Copy.Name = "copy"
- clients, err := NewMulti(metrics, util_log.Logger, 0, 0, cc1, cc1Copy)
+ clients, err := NewMulti(metrics, util_log.Logger, 0, 0, false, cc1, cc1Copy)
require.NoError(t, err, "expected NewMulti to reject duplicate client configs")
multi := clients.(*MultiClient)
@@ -148,9 +148,9 @@ func TestMultiClient_Handle(t *testing.T) {
func TestMultiClient_Handle_Race(t *testing.T) {
u := flagext.URLValue{}
require.NoError(t, u.Set("http://localhost"))
- c1, err := New(nilMetrics, Config{URL: u, BackoffConfig: backoff.Config{MaxRetries: 1}, Timeout: time.Microsecond}, 0, 0, log.NewNopLogger())
+ c1, err := New(nilMetrics, Config{URL: u, BackoffConfig: backoff.Config{MaxRetries: 1}, Timeout: time.Microsecond}, 0, 0, false, log.NewNopLogger())
require.NoError(t, err)
- c2, err := New(nilMetrics, Config{URL: u, BackoffConfig: backoff.Config{MaxRetries: 1}, Timeout: time.Microsecond}, 0, 0, log.NewNopLogger())
+ c2, err := New(nilMetrics, Config{URL: u, BackoffConfig: backoff.Config{MaxRetries: 1}, Timeout: time.Microsecond}, 0, 0, false, log.NewNopLogger())
require.NoError(t, err)
clients := []Client{c1, c2}
m := &MultiClient{
diff --git a/clients/pkg/promtail/limit/config.go b/clients/pkg/promtail/limit/config.go
index 1d7a8ec16feae..02589afd86b89 100644
--- a/clients/pkg/promtail/limit/config.go
+++ b/clients/pkg/promtail/limit/config.go
@@ -13,6 +13,7 @@ type Config struct {
ReadlineRateDrop bool `mapstructure:"readline_rate_drop,omitempty" yaml:"readline_rate_drop,omitempty" json:"readline_rate_drop"`
MaxStreams int `mapstructure:"max_streams" yaml:"max_streams" json:"max_streams"`
MaxLineSize flagext.ByteSize `mapstructure:"max_line_size" yaml:"max_line_size" json:"max_line_size"`
+ MaxLineSizeTruncate bool `mapstructure:"max_line_size_truncate" yaml:"max_line_size_truncate" json:"max_line_size_truncate"`
}
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
@@ -22,4 +23,5 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.BoolVar(&cfg.ReadlineRateDrop, prefix+"limit.readline-rate-drop", true, "When true, exceeding the rate limit causes this instance of Promtail to discard log lines, rather than sending them to Loki.")
f.IntVar(&cfg.MaxStreams, prefix+"max-streams", 0, "Maximum number of active streams. 0 to disable.")
f.Var(&cfg.MaxLineSize, prefix+"max-line-size", "Maximum log line byte size allowed without dropping. Example: 256kb, 2M. 0 to disable.")
+ f.BoolVar(&cfg.MaxLineSizeTruncate, prefix+"max-line-size-truncate", false, "Whether to truncate lines that exceed max_line_size. No effect if max_line_size is disabled")
}
diff --git a/clients/pkg/promtail/promtail.go b/clients/pkg/promtail/promtail.go
index b06086803afce..d8e2deb47a0f5 100644
--- a/clients/pkg/promtail/promtail.go
+++ b/clients/pkg/promtail/promtail.go
@@ -140,7 +140,7 @@ func (p *Promtail) reloadConfig(cfg *config.Config) error {
}
cfg.PositionsConfig.ReadOnly = true
} else {
- p.client, err = client.NewMulti(p.metrics, p.logger, cfg.LimitsConfig.MaxStreams, cfg.LimitsConfig.MaxLineSize.Val(), cfg.ClientConfigs...)
+ p.client, err = client.NewMulti(p.metrics, p.logger, cfg.LimitsConfig.MaxStreams, cfg.LimitsConfig.MaxLineSize.Val(), cfg.LimitsConfig.MaxLineSizeTruncate, cfg.ClientConfigs...)
if err != nil {
return err
}
diff --git a/clients/pkg/promtail/targets/file/decompresser_test.go b/clients/pkg/promtail/targets/file/decompresser_test.go
index be2f132ca4a1b..e98b9c666fc8d 100644
--- a/clients/pkg/promtail/targets/file/decompresser_test.go
+++ b/clients/pkg/promtail/targets/file/decompresser_test.go
@@ -7,11 +7,12 @@ import (
"time"
"github.com/go-kit/log"
- "github.com/grafana/loki/clients/pkg/promtail/api"
- "github.com/grafana/loki/clients/pkg/promtail/client/fake"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
+
+ "github.com/grafana/loki/clients/pkg/promtail/api"
+ "github.com/grafana/loki/clients/pkg/promtail/client/fake"
)
type noopClient struct {
diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget_test.go b/clients/pkg/promtail/targets/lokipush/pushtarget_test.go
index 78c8e99a178f8..815fc10400c6f 100644
--- a/clients/pkg/promtail/targets/lokipush/pushtarget_test.go
+++ b/clients/pkg/promtail/targets/lokipush/pushtarget_test.go
@@ -85,7 +85,7 @@ func TestLokiPushTarget(t *testing.T) {
BatchSize: 100 * 1024,
}
m := client.NewMetrics(prometheus.DefaultRegisterer)
- pc, err := client.New(m, ccfg, 0, 0, logger)
+ pc, err := client.New(m, ccfg, 0, 0, false, logger)
require.NoError(t, err)
defer pc.Stop()
diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md
index 7f4734fe7d5d3..0178021938af1 100644
--- a/docs/sources/clients/promtail/configuration.md
+++ b/docs/sources/clients/promtail/configuration.md
@@ -1942,8 +1942,10 @@ The optional `limits_config` block configures global limits for this instance of
# 0 means it is disabled.
[max_streams: <int> | default = 0]
-Maximum log line byte size allowed without dropping. Example: 256kb, 2M. 0 to disable.
+# Maximum log line byte size allowed without dropping. Example: 256kb, 2M. 0 to disable.
[max_line_size: <int> | default = 0]
+# Whether to truncate lines that exceed max_line_size. No effect if max_line_size is disabled
+[max_line_size_truncate: <bool> | default = false]
```
## target_config
|
promtail
|
Add `max-line-size-truncate` (#8233)
|
833bf0def6a07e2f58996f54b4b983858750e3e3
|
2024-10-04 21:14:27
|
Christian Haudum
|
fix(logql): Fix panic in json parsing when using empty array index (#14393)
| false
|
diff --git a/go.mod b/go.mod
index 32cae2cba59dc..c003fd8813f93 100644
--- a/go.mod
+++ b/go.mod
@@ -125,7 +125,7 @@ require (
github.com/efficientgo/core v1.0.0-rc.3
github.com/fsnotify/fsnotify v1.7.0
github.com/gogo/googleapis v1.4.1
- github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32
+ github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675
github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/hashicorp/raft v1.7.1
diff --git a/go.sum b/go.sum
index 00afdeb6e1231..ce616ba23be67 100644
--- a/go.sum
+++ b/go.sum
@@ -1052,8 +1052,8 @@ github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/I
github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I=
github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE=
github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU=
-github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 h1:NznuPwItog+rwdVg8hAuGKP29ndRSzJAwhxKldkP8oQ=
-github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY=
+github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 h1:U94jQ2TQr1m3HNyE8efSdyaBbDrdPaWImXyenuKZ/nw=
+github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg=
diff --git a/pkg/logql/log/parser_test.go b/pkg/logql/log/parser_test.go
index 28989b7cb5fe8..fd08467af8586 100644
--- a/pkg/logql/log/parser_test.go
+++ b/pkg/logql/log/parser_test.go
@@ -368,6 +368,26 @@ func TestJSONExpressionParser(t *testing.T) {
labels.FromStrings("param", "1"),
NoParserHints(),
},
+ {
+ "object element not present",
+ testLine,
+ []LabelExtractionExpr{
+ NewLabelExtractionExpr("undefined", `pod[""]`),
+ },
+ labels.EmptyLabels(),
+ labels.FromStrings("undefined", ""),
+ NoParserHints(),
+ },
+ {
+ "accessing invalid array index",
+ testLine,
+ []LabelExtractionExpr{
+ NewLabelExtractionExpr("param", `pod.deployment.params[""]`),
+ },
+ labels.EmptyLabels(),
+ labels.FromStrings("param", ""),
+ NoParserHints(),
+ },
{
"array string element",
testLine,
diff --git a/vendor/github.com/grafana/jsonparser/parser.go b/vendor/github.com/grafana/jsonparser/parser.go
index 5df2a463dcee3..9958ab4ee531d 100644
--- a/vendor/github.com/grafana/jsonparser/parser.go
+++ b/vendor/github.com/grafana/jsonparser/parser.go
@@ -512,10 +512,10 @@ func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]str
}
for pi, p := range paths {
- if len(p) < level+1 || pathFlags[pi] || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) {
+ if len(p) < level+1 || pathFlags[pi] || p[level] == "" || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) {
continue
}
- if len(p[level]) >= 2 {
+ if len(p[level]) > 2 {
aIdx, _ := strconv.Atoi(p[level][1 : len(p[level])-1])
arrIdxFlags[aIdx] = x
pIdxFlags[pi] = true
@@ -712,12 +712,10 @@ func WriteToBuffer(buffer []byte, str string) int {
}
/*
-
Del - Receives existing data structure, path to delete.
Returns:
`data` - return modified data
-
*/
func Delete(data []byte, keys ...string) []byte {
lk := len(keys)
@@ -798,13 +796,11 @@ func Delete(data []byte, keys ...string) []byte {
}
/*
-
Set - Receives existing data structure, path to set, and data to set at that key.
Returns:
`value` - modified byte array
`err` - On any parsing error
-
*/
func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) {
// ensure keys are set
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 4e50c0c98bded..bc92b7462c8e2 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1037,7 +1037,7 @@ github.com/grafana/go-gelf/v2/gelf
# github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56
## explicit; go 1.18
github.com/grafana/gomemcache/memcache
-# github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32
+# github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675
## explicit; go 1.13
github.com/grafana/jsonparser
# github.com/grafana/loki/pkg/push v0.0.0-20240924133635-758364c7775f => ./pkg/push
|
fix
|
Fix panic in json parsing when using empty array index (#14393)
|
fa6ef0a2caeeb4d31700287e9096e5f2c3c3a0d4
|
2025-01-17 19:19:37
|
Zero King
|
docs: Fix JSON parser example (#10527)
| false
|
diff --git a/docs/sources/query/log_queries/_index.md b/docs/sources/query/log_queries/_index.md
index 52650aa753298..dd28255bde0f1 100644
--- a/docs/sources/query/log_queries/_index.md
+++ b/docs/sources/query/log_queries/_index.md
@@ -340,6 +340,8 @@ The **json** parser operates in two modes:
"request_method" => "GET"
"request_host" => "foo.grafana.net"
"request_size" => "55"
+ "request_headers_Accept" => "*/*"
+ "request_headers_User_Agent" => "curl/7.68.0"
"response_status" => "401"
"response_size" => "228"
"response_latency_seconds" => "6.031"
|
docs
|
Fix JSON parser example (#10527)
|
9a1fe95c11ef731a7377a2622a3e0ffc54792345
|
2024-11-07 19:20:02
|
George Robinson
|
chore: remove initialization of RF-1 client (#14810)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index 612946add4aeb..8714ea6314e92 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -133,11 +133,6 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set
# the querier.
[ingester_client: <ingester_client>]
-# The ingester_client block configures how the distributor will connect to
-# ingesters. Only appropriate when running all components, the distributor, or
-# the querier.
-[ingester_rf1_client: <ingester_client>]
-
# The ingester block configures the ingester and how the ingester will register
# itself to a key value store.
[ingester: <ingester>]
@@ -308,7 +303,7 @@ pattern_ingester:
# Configures how the gRPC connection to ingesters work as a client.
# The CLI flags prefix for this block configuration is:
- # bloom-build.builder.grpc
+ # pattern-ingester.client
[grpc_client_config: <grpc_client>]
# How many flushes can happen concurrently from each stream.
@@ -740,6 +735,8 @@ metastore_client:
[address: <string> | default = "localhost:9095"]
# Configures the gRPC client used to communicate with the metastore.
+ # The CLI flags prefix for this block configuration is:
+ # metastore.grpc-client-config
[grpc_client_config: <grpc_client>]
kafka_config:
@@ -1260,7 +1257,7 @@ builder:
# The grpc_client block configures the gRPC client used to communicate between
# a client and server component in Loki.
# The CLI flags prefix for this block configuration is:
- # bloom-gateway-client.grpc
+ # bloom-build.builder.grpc
[grpc_config: <grpc_client>]
# Hostname (and port) of the bloom planner
@@ -1299,7 +1296,8 @@ client:
# The grpc_client block configures the gRPC client used to communicate between
# a client and server component in Loki.
- # The CLI flags prefix for this block configuration is: bigtable
+ # The CLI flags prefix for this block configuration is:
+ # bloom-gateway-client.grpc
[grpc_client_config: <grpc_client>]
results_cache:
@@ -2508,20 +2506,20 @@ The `frontend_worker` configures the worker - running within the Loki querier -
# Configures the querier gRPC client used to communicate with the
# query-frontend. This can't be used in conjunction with 'grpc_client_config'.
-# The CLI flags prefix for this block configuration is: querier.frontend-client
+# The CLI flags prefix for this block configuration is:
+# querier.frontend-grpc-client
[query_frontend_grpc_client: <grpc_client>]
# Configures the querier gRPC client used to communicate with the query-frontend
# and with the query-scheduler. This can't be used in conjunction with
# 'query_frontend_grpc_client' or 'query_scheduler_grpc_client'.
-# The CLI flags prefix for this block configuration is:
-# querier.scheduler-grpc-client
+# The CLI flags prefix for this block configuration is: querier.frontend-client
[grpc_client_config: <grpc_client>]
# Configures the querier gRPC client used to communicate with the
# query-scheduler. This can't be used in conjunction with 'grpc_client_config'.
# The CLI flags prefix for this block configuration is:
-# metastore.grpc-client-config
+# querier.scheduler-grpc-client
[query_scheduler_grpc_client: <grpc_client>]
```
@@ -2592,82 +2590,82 @@ The `grpc_client` block configures the gRPC client used to communicate between a
```yaml
# gRPC client max receive message size (bytes).
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.grpc-max-recv-msg-size
+# CLI flag: -<prefix>.grpc-max-recv-msg-size
[max_recv_msg_size: <int> | default = 104857600]
# gRPC client max send message size (bytes).
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.grpc-max-send-msg-size
+# CLI flag: -<prefix>.grpc-max-send-msg-size
[max_send_msg_size: <int> | default = 104857600]
# Use compression when sending messages. Supported values are: 'gzip', 'snappy'
# and '' (disable compression)
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.grpc-compression
+# CLI flag: -<prefix>.grpc-compression
[grpc_compression: <string> | default = ""]
# Rate limit for gRPC client; 0 means disabled.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.grpc-client-rate-limit
+# CLI flag: -<prefix>.grpc-client-rate-limit
[rate_limit: <float> | default = 0]
# Rate limit burst for gRPC client.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.grpc-client-rate-limit-burst
+# CLI flag: -<prefix>.grpc-client-rate-limit-burst
[rate_limit_burst: <int> | default = 0]
# Enable backoff and retry when we hit rate limits.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.backoff-on-ratelimits
+# CLI flag: -<prefix>.backoff-on-ratelimits
[backoff_on_ratelimits: <boolean> | default = false]
backoff_config:
# Minimum delay when backing off.
- # CLI flag: -boltdb.shipper.index-gateway-client.grpc.backoff-min-period
+ # CLI flag: -<prefix>.backoff-min-period
[min_period: <duration> | default = 100ms]
# Maximum delay when backing off.
- # CLI flag: -boltdb.shipper.index-gateway-client.grpc.backoff-max-period
+ # CLI flag: -<prefix>.backoff-max-period
[max_period: <duration> | default = 10s]
# Number of times to backoff and retry before failing.
- # CLI flag: -boltdb.shipper.index-gateway-client.grpc.backoff-retries
+ # CLI flag: -<prefix>.backoff-retries
[max_retries: <int> | default = 10]
# Initial stream window size. Values less than the default are not supported and
# are ignored. Setting this to a value other than the default disables the BDP
# estimator.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.initial-stream-window-size
+# CLI flag: -<prefix>.initial-stream-window-size
[initial_stream_window_size: <int> | default = 63KiB1023B]
# Initial connection window size. Values less than the default are not supported
# and are ignored. Setting this to a value other than the default disables the
# BDP estimator.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.initial-connection-window-size
+# CLI flag: -<prefix>.initial-connection-window-size
[initial_connection_window_size: <int> | default = 63KiB1023B]
# Enable TLS in the gRPC client. This flag needs to be enabled when any other
# TLS flag is set. If set to false, insecure connection to gRPC server will be
# used.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.tls-enabled
+# CLI flag: -<prefix>.tls-enabled
[tls_enabled: <boolean> | default = false]
# Path to the client certificate, which will be used for authenticating with the
# server. Also requires the key path to be configured.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.tls-cert-path
+# CLI flag: -<prefix>.tls-cert-path
[tls_cert_path: <string> | default = ""]
# Path to the key for the client certificate. Also requires the client
# certificate to be configured.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.tls-key-path
+# CLI flag: -<prefix>.tls-key-path
[tls_key_path: <string> | default = ""]
# Path to the CA certificates to validate server certificate against. If not
# set, the host's root CA certificates are used.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.tls-ca-path
+# CLI flag: -<prefix>.tls-ca-path
[tls_ca_path: <string> | default = ""]
# Override the expected name on the server certificate.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.tls-server-name
+# CLI flag: -<prefix>.tls-server-name
[tls_server_name: <string> | default = ""]
# Skip validating server certificate.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.tls-insecure-skip-verify
+# CLI flag: -<prefix>.tls-insecure-skip-verify
[tls_insecure_skip_verify: <boolean> | default = false]
# Override the default cipher suite list (separated by commas). Allowed values:
@@ -2700,27 +2698,27 @@ backoff_config:
# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites
+# CLI flag: -<prefix>.tls-cipher-suites
[tls_cipher_suites: <string> | default = ""]
# Override the default minimum TLS version. Allowed values: VersionTLS10,
# VersionTLS11, VersionTLS12, VersionTLS13
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.tls-min-version
+# CLI flag: -<prefix>.tls-min-version
[tls_min_version: <string> | default = ""]
# The maximum amount of time to establish a connection. A value of 0 means
# default gRPC client connect timeout and backoff.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.connect-timeout
+# CLI flag: -<prefix>.connect-timeout
[connect_timeout: <duration> | default = 5s]
# Initial backoff delay after first connection failure. Only relevant if
# ConnectTimeout > 0.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.connect-backoff-base-delay
+# CLI flag: -<prefix>.connect-backoff-base-delay
[connect_backoff_base_delay: <duration> | default = 1s]
# Maximum backoff delay when establishing a connection. Only relevant if
# ConnectTimeout > 0.
-# CLI flag: -boltdb.shipper.index-gateway-client.grpc.connect-backoff-max-delay
+# CLI flag: -<prefix>.connect-backoff-max-delay
[connect_backoff_max_delay: <duration> | default = 5s]
```
@@ -5409,8 +5407,7 @@ bigtable:
# The grpc_client block configures the gRPC client used to communicate between
# a client and server component in Loki.
- # The CLI flags prefix for this block configuration is:
- # boltdb.shipper.index-gateway-client.grpc
+ # The CLI flags prefix for this block configuration is: bigtable
[grpc_client_config: <grpc_client>]
# If enabled, once a tables info is fetched, it is cached.
@@ -5704,7 +5701,7 @@ boltdb_shipper:
# The grpc_client block configures the gRPC client used to communicate
# between a client and server component in Loki.
# The CLI flags prefix for this block configuration is:
- # tsdb.shipper.index-gateway-client.grpc
+ # boltdb.shipper.index-gateway-client.grpc
[grpc_client_config: <grpc_client>]
# Hostname or IP of the Index Gateway gRPC server running in simple mode.
@@ -5759,7 +5756,7 @@ tsdb_shipper:
# The grpc_client block configures the gRPC client used to communicate
# between a client and server component in Loki.
# The CLI flags prefix for this block configuration is:
- # querier.frontend-grpc-client
+ # tsdb.shipper.index-gateway-client.grpc
[grpc_client_config: <grpc_client>]
# Hostname or IP of the Index Gateway gRPC server running in simple mode.
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 7a3d20df2139e..f9c92ea010c26 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -92,7 +92,6 @@ type Config struct {
Ruler ruler.Config `yaml:"ruler,omitempty"`
RulerStorage rulestore.Config `yaml:"ruler_storage,omitempty" doc:"hidden"`
IngesterClient ingester_client.Config `yaml:"ingester_client,omitempty"`
- IngesterRF1Client ingester_client.Config `yaml:"ingester_rf1_client,omitempty"`
Ingester ingester.Config `yaml:"ingester,omitempty"`
Pattern pattern.Config `yaml:"pattern_ingester,omitempty"`
IndexGateway indexgateway.Config `yaml:"index_gateway"`
@@ -357,7 +356,6 @@ type Loki struct {
distributor *distributor.Distributor
Ingester ingester.Interface
IngesterRF1 ingester_rf1.Interface
- IngesterRF1RingClient *ingester_rf1.RingClient
PatternIngester *pattern.Ingester
PatternRingClient pattern.RingClient
Querier querier.Querier
|
chore
|
remove initialization of RF-1 client (#14810)
|
6354deda90a9430856447e27123b3a33fd1b77a0
|
2024-09-27 16:43:18
|
George Robinson
|
fix: missing dep PartitionRing for Ingester (#14292)
| false
|
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 20ea802f12ebb..84af0a73504f8 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -719,7 +719,7 @@ func (t *Loki) setupModuleManager() error {
TenantConfigs: {RuntimeConfig},
Distributor: {Ring, Server, Overrides, TenantConfigs, PatternRingClient, PatternIngesterTee, Analytics, PartitionRing},
Store: {Overrides, IndexGatewayRing},
- Ingester: {Store, Server, MemberlistKV, TenantConfigs, Analytics},
+ Ingester: {Store, Server, MemberlistKV, TenantConfigs, Analytics, PartitionRing},
Querier: {Store, Ring, Server, IngesterQuerier, PatternRingClient, Overrides, Analytics, CacheGenerationLoader, QuerySchedulerRing},
QueryFrontendTripperware: {Server, Overrides, TenantConfigs},
QueryFrontend: {QueryFrontendTripperware, Analytics, CacheGenerationLoader, QuerySchedulerRing},
|
fix
|
missing dep PartitionRing for Ingester (#14292)
|
79e0d96978fcfa218a37b3731d5135599962ff6b
|
2022-12-13 22:31:40
|
Tobias Wolf
|
helm: Decouple the Canary from self-monitoring (#7757)
| false
|
diff --git a/docs/sources/installation/helm/concepts.md b/docs/sources/installation/helm/concepts.md
index c4d2adf7e7ad0..9a2191640ddae 100644
--- a/docs/sources/installation/helm/concepts.md
+++ b/docs/sources/installation/helm/concepts.md
@@ -25,7 +25,7 @@ This chart includes dashboards for monitoring Loki. These require the scrape con
## Canary
-This chart installs the [canary](../../../operations/loki-canary) and its alerts by default. This is another tool to verify the Loki deployment is in a healthy state. It can be disabled with `monitoring.selfMonitoring.lokiCanary.enabled=false`.
+This chart installs the [canary](../../../operations/loki-canary) and its alerts by default. This is another tool to verify the Loki deployment is in a healthy state. It can be disabled with `monitoring.lokiCanary.enabled=false`.
## Gateway
diff --git a/docs/sources/installation/helm/monitor-and-alert/index.md b/docs/sources/installation/helm/monitor-and-alert/index.md
index cde667a0e4e3a..c33082133acbc 100644
--- a/docs/sources/installation/helm/monitor-and-alert/index.md
+++ b/docs/sources/installation/helm/monitor-and-alert/index.md
@@ -119,8 +119,6 @@ Rules and alerts are automatically deployed.
...
selfMonitoring:
enabled: true
- lokiCanary:
- enabled: false
logsInstance:
clients:
- url: <logs remote write endpoint>
@@ -130,7 +128,9 @@ Rules and alerts are automatically deployed.
key: username
password:
name: primary-credentials-logs
- key: password
+ key: password
+ lokiCanary:
+ enabled: false
```
5. Install the `Loki meta-motoring` connection on Grafana Cloud.
diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md
index 9c97481811dd0..c84a51484604d 100644
--- a/docs/sources/installation/helm/reference.md
+++ b/docs/sources/installation/helm/reference.md
@@ -1730,268 +1730,268 @@ null
</td>
</tr>
<tr>
- <td>monitoring.rules.additionalGroups</td>
- <td>list</td>
- <td>Additional groups to add to the rules file</td>
+ <td>monitoring.lokiCanary.annotations</td>
+ <td>object</td>
+ <td>Additional annotations for the `loki-canary` Daemonset</td>
<td><pre lang="json">
-[]
+{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.rules.alerting</td>
+ <td>monitoring.lokiCanary.enabled</td>
<td>bool</td>
- <td>Include alerting rules</td>
+ <td></td>
<td><pre lang="json">
true
</pre>
</td>
</tr>
<tr>
- <td>monitoring.rules.annotations</td>
- <td>object</td>
- <td>Additional annotations for the rules PrometheusRule resource</td>
+ <td>monitoring.lokiCanary.extraArgs</td>
+ <td>list</td>
+ <td>Additional CLI arguments for the `loki-canary' command</td>
<td><pre lang="json">
-{}
+[]
</pre>
</td>
</tr>
<tr>
- <td>monitoring.rules.enabled</td>
- <td>bool</td>
- <td>If enabled, create PrometheusRule resource with Loki recording rules</td>
+ <td>monitoring.lokiCanary.extraEnv</td>
+ <td>list</td>
+ <td>Environment variables to add to the canary pods</td>
<td><pre lang="json">
-true
+[]
</pre>
</td>
</tr>
<tr>
- <td>monitoring.rules.labels</td>
- <td>object</td>
- <td>Additional labels for the rules PrometheusRule resource</td>
+ <td>monitoring.lokiCanary.extraEnvFrom</td>
+ <td>list</td>
+ <td>Environment variables from secrets or configmaps to add to the canary pods</td>
<td><pre lang="json">
-{}
+[]
</pre>
</td>
</tr>
<tr>
- <td>monitoring.rules.namespace</td>
- <td>string</td>
- <td>Alternative namespace to create recording rules PrometheusRule resource in</td>
+ <td>monitoring.lokiCanary.image</td>
+ <td>object</td>
+ <td>Image to use for loki canary</td>
<td><pre lang="json">
-null
+{
+ "pullPolicy": "IfNotPresent",
+ "registry": "docker.io",
+ "repository": "grafana/loki-canary",
+ "tag": null
+}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.enabled</td>
- <td>bool</td>
- <td></td>
+ <td>monitoring.lokiCanary.image.pullPolicy</td>
+ <td>string</td>
+ <td>Docker image pull policy</td>
<td><pre lang="json">
-true
+"IfNotPresent"
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.grafanaAgent.annotations</td>
- <td>object</td>
- <td>Grafana Agent annotations</td>
+ <td>monitoring.lokiCanary.image.registry</td>
+ <td>string</td>
+ <td>The Docker registry</td>
<td><pre lang="json">
-{}
+"docker.io"
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.grafanaAgent.enableConfigReadAPI</td>
- <td>bool</td>
- <td>Enable the config read api on port 8080 of the agent</td>
+ <td>monitoring.lokiCanary.image.repository</td>
+ <td>string</td>
+ <td>Docker image repository</td>
<td><pre lang="json">
-false
+"grafana/loki-canary"
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.grafanaAgent.installOperator</td>
- <td>bool</td>
- <td>Controls whether to install the Grafana Agent Operator and its CRDs. Note that helm will not install CRDs if this flag is enabled during an upgrade. In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds</td>
+ <td>monitoring.lokiCanary.image.tag</td>
+ <td>string</td>
+ <td>Overrides the image tag whose default is the chart's appVersion</td>
<td><pre lang="json">
-true
+null
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.grafanaAgent.labels</td>
+ <td>monitoring.lokiCanary.nodeSelector</td>
<td>object</td>
- <td>Additional Grafana Agent labels</td>
+ <td>Node selector for canary pods</td>
<td><pre lang="json">
{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.grafanaAgent.namespace</td>
- <td>string</td>
- <td>Alternative namespace for Grafana Agent resources</td>
- <td><pre lang="json">
-null
-</pre>
-</td>
- </tr>
- <tr>
- <td>monitoring.selfMonitoring.logsInstance.annotations</td>
+ <td>monitoring.lokiCanary.resources</td>
<td>object</td>
- <td>LogsInstance annotations</td>
+ <td>Resource requests and limits for the canary</td>
<td><pre lang="json">
{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.logsInstance.clients</td>
- <td>string</td>
- <td>Additional clients for remote write</td>
+ <td>monitoring.lokiCanary.tolerations</td>
+ <td>list</td>
+ <td>Tolerations for canary pods</td>
<td><pre lang="json">
-null
+[]
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.logsInstance.labels</td>
- <td>object</td>
- <td>Additional LogsInstance labels</td>
+ <td>monitoring.rules.additionalGroups</td>
+ <td>list</td>
+ <td>Additional groups to add to the rules file</td>
<td><pre lang="json">
-{}
+[]
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.logsInstance.namespace</td>
- <td>string</td>
- <td>Alternative namespace for LogsInstance resources</td>
+ <td>monitoring.rules.alerting</td>
+ <td>bool</td>
+ <td>Include alerting rules</td>
<td><pre lang="json">
-null
+true
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.annotations</td>
+ <td>monitoring.rules.annotations</td>
<td>object</td>
- <td>Additional annotations for the `loki-canary` Daemonset</td>
+ <td>Additional annotations for the rules PrometheusRule resource</td>
<td><pre lang="json">
{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.enabled</td>
+ <td>monitoring.rules.enabled</td>
<td>bool</td>
- <td></td>
+ <td>If enabled, create PrometheusRule resource with Loki recording rules</td>
<td><pre lang="json">
true
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.extraArgs</td>
- <td>list</td>
- <td>Additional CLI arguments for the `loki-canary' command</td>
+ <td>monitoring.rules.labels</td>
+ <td>object</td>
+ <td>Additional labels for the rules PrometheusRule resource</td>
<td><pre lang="json">
-[]
+{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.extraEnv</td>
- <td>list</td>
- <td>Environment variables to add to the canary pods</td>
+ <td>monitoring.rules.namespace</td>
+ <td>string</td>
+ <td>Alternative namespace to create recording rules PrometheusRule resource in</td>
<td><pre lang="json">
-[]
+null
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.extraEnvFrom</td>
- <td>list</td>
- <td>Environment variables from secrets or configmaps to add to the canary pods</td>
+ <td>monitoring.selfMonitoring.enabled</td>
+ <td>bool</td>
+ <td></td>
<td><pre lang="json">
-[]
+true
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.image</td>
+ <td>monitoring.selfMonitoring.grafanaAgent.annotations</td>
<td>object</td>
- <td>Image to use for loki canary</td>
+ <td>Grafana Agent annotations</td>
<td><pre lang="json">
-{
- "pullPolicy": "IfNotPresent",
- "registry": "docker.io",
- "repository": "grafana/loki-canary",
- "tag": null
-}
+{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.image.pullPolicy</td>
- <td>string</td>
- <td>Docker image pull policy</td>
+ <td>monitoring.selfMonitoring.grafanaAgent.enableConfigReadAPI</td>
+ <td>bool</td>
+ <td>Enable the config read api on port 8080 of the agent</td>
<td><pre lang="json">
-"IfNotPresent"
+false
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.image.registry</td>
- <td>string</td>
- <td>The Docker registry</td>
+ <td>monitoring.selfMonitoring.grafanaAgent.installOperator</td>
+ <td>bool</td>
+ <td>Controls whether to install the Grafana Agent Operator and its CRDs. Note that helm will not install CRDs if this flag is enabled during an upgrade. In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds</td>
<td><pre lang="json">
-"docker.io"
+true
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.image.repository</td>
- <td>string</td>
- <td>Docker image repository</td>
+ <td>monitoring.selfMonitoring.grafanaAgent.labels</td>
+ <td>object</td>
+ <td>Additional Grafana Agent labels</td>
<td><pre lang="json">
-"grafana/loki-canary"
+{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.image.tag</td>
+ <td>monitoring.selfMonitoring.grafanaAgent.namespace</td>
<td>string</td>
- <td>Overrides the image tag whose default is the chart's appVersion</td>
+ <td>Alternative namespace for Grafana Agent resources</td>
<td><pre lang="json">
null
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.nodeSelector</td>
+ <td>monitoring.selfMonitoring.logsInstance.annotations</td>
<td>object</td>
- <td>Node selector for canary pods</td>
+ <td>LogsInstance annotations</td>
<td><pre lang="json">
{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.resources</td>
+ <td>monitoring.selfMonitoring.logsInstance.clients</td>
+ <td>string</td>
+ <td>Additional clients for remote write</td>
+ <td><pre lang="json">
+null
+</pre>
+</td>
+ </tr>
+ <tr>
+ <td>monitoring.selfMonitoring.logsInstance.labels</td>
<td>object</td>
- <td>Resource requests and limits for the canary</td>
+ <td>Additional LogsInstance labels</td>
<td><pre lang="json">
{}
</pre>
</td>
</tr>
<tr>
- <td>monitoring.selfMonitoring.lokiCanary.tolerations</td>
- <td>list</td>
- <td>Tolerations for canary pods</td>
+ <td>monitoring.selfMonitoring.logsInstance.namespace</td>
+ <td>string</td>
+ <td>Alternative namespace for LogsInstance resources</td>
<td><pre lang="json">
-[]
+null
</pre>
</td>
</tr>
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 650f2825b469a..4288f806388b6 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -11,6 +11,12 @@ Entries should be ordered as follows:
Entries should include a reference to the pull request that introduced the change.
+## 3.7.0
+
+**BREAKING**: Configuration values for Loki Canary moved from `monitoring.selfMonitoring.lokiCanary` to `monitoring.lokiCanary`
+
+- [ENHANCEMENT] Decouple the Loki Canary from the self-monitoring setup, which adds an unnecessary dependency on the Grafana Agent Operator.
+
## 3.6.1
- [BUGFIX] Fix regression that produced empty PrometheusRule alerts resource
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 4e54ac4587946..a96aa37680d05 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -4,7 +4,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.7.0
-version: 3.6.1
+version: 3.7.0
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index 7ed7d29c08c7c..e2117d047ea13 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/ci/ingress-values.yaml b/production/helm/loki/ci/ingress-values.yaml
index b78242f0c1add..23233b487cb9e 100644
--- a/production/helm/loki/ci/ingress-values.yaml
+++ b/production/helm/loki/ci/ingress-values.yaml
@@ -16,8 +16,7 @@ read:
write:
replicas: 1
monitoring:
- selfMonitoring:
- lokiCanary:
- enabled: false
+ lokiCanary:
+ enabled: false
test:
enabled: false
diff --git a/production/helm/loki/templates/loki-canary/_helpers.tpl b/production/helm/loki/templates/loki-canary/_helpers.tpl
index 6ef5064c3cf8a..28ce60d10cc50 100644
--- a/production/helm/loki/templates/loki-canary/_helpers.tpl
+++ b/production/helm/loki/templates/loki-canary/_helpers.tpl
@@ -25,7 +25,7 @@ app.kubernetes.io/component: canary
Docker image name for loki-canary
*/}}
{{- define "loki-canary.image" -}}
-{{- $dict := dict "service" .Values.monitoring.selfMonitoring.lokiCanary.image "global" .Values.global.image "defaultVersion" .Chart.AppVersion -}}
+{{- $dict := dict "service" .Values.monitoring.lokiCanary.image "global" .Values.global.image "defaultVersion" .Chart.AppVersion -}}
{{- include "loki.baseImage" $dict -}}
{{- end -}}
diff --git a/production/helm/loki/templates/loki-canary/daemonset.yaml b/production/helm/loki/templates/loki-canary/daemonset.yaml
index 8540d813c222e..0c7b5a34b42d7 100644
--- a/production/helm/loki/templates/loki-canary/daemonset.yaml
+++ b/production/helm/loki/templates/loki-canary/daemonset.yaml
@@ -1,4 +1,4 @@
-{{- with .Values.monitoring.selfMonitoring.lokiCanary -}}
+{{- with .Values.monitoring.lokiCanary -}}
{{- if .enabled -}}
---
apiVersion: apps/v1
diff --git a/production/helm/loki/templates/loki-canary/service.yaml b/production/helm/loki/templates/loki-canary/service.yaml
index ef12be38e9a4b..5bc2538927ba8 100644
--- a/production/helm/loki/templates/loki-canary/service.yaml
+++ b/production/helm/loki/templates/loki-canary/service.yaml
@@ -1,4 +1,4 @@
-{{- if .Values.monitoring.selfMonitoring.lokiCanary.enabled -}}
+{{- if .Values.monitoring.lokiCanary.enabled -}}
---
apiVersion: v1
kind: Service
diff --git a/production/helm/loki/templates/loki-canary/serviceaccount.yaml b/production/helm/loki/templates/loki-canary/serviceaccount.yaml
index 4c19925161275..5c2973bedf106 100644
--- a/production/helm/loki/templates/loki-canary/serviceaccount.yaml
+++ b/production/helm/loki/templates/loki-canary/serviceaccount.yaml
@@ -1,4 +1,4 @@
-{{- if .Values.monitoring.selfMonitoring.lokiCanary.enabled -}}
+{{- if .Values.monitoring.lokiCanary.enabled -}}
---
apiVersion: v1
kind: ServiceAccount
@@ -7,7 +7,7 @@ metadata:
labels:
{{- include "loki-canary.labels" . | nindent 4 }}
annotations:
- {{- with .Values.monitoring.selfMonitoring.lokiCanary.annotations }}
+ {{- with .Values.monitoring.lokiCanary.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
diff --git a/production/helm/loki/templates/tests/test-canary.yaml b/production/helm/loki/templates/tests/test-canary.yaml
index ace291cb0e69e..246fb4dfdc4c3 100644
--- a/production/helm/loki/templates/tests/test-canary.yaml
+++ b/production/helm/loki/templates/tests/test-canary.yaml
@@ -1,5 +1,5 @@
{{- with .Values.test }}
-{{- if and .enabled $.Values.monitoring.selfMonitoring.enabled $.Values.monitoring.selfMonitoring.lokiCanary.enabled }}
+{{- if and .enabled $.Values.monitoring.selfMonitoring.enabled $.Values.monitoring.lokiCanary.enabled }}
---
apiVersion: v1
kind: Pod
diff --git a/production/helm/loki/templates/validate.yaml b/production/helm/loki/templates/validate.yaml
index cd1e85b34edeb..2ea355d0c2563 100644
--- a/production/helm/loki/templates/validate.yaml
+++ b/production/helm/loki/templates/validate.yaml
@@ -2,22 +2,14 @@
{{- fail "Top level 'config' is not allowed. Most common configuration sections are exposed under the `loki` section. If you need to override the whole config, provide the configuration as a string that can contain template expressions under `loki.config`. Alternatively, you can provide the configuration as an external secret." }}
{{- end }}
-{{ with .Values.monitoring.selfMonitoring}}
-
-{{- if and (not .enabled) .lokiCanary.enabled }}
-{{- fail "Loki Canary requires self monitoring to also be enabled"}}
-{{- end }}
-
-{{- if and (not .enabled) $.Values.test.enabled }}
+{{- if and (not .Values.monitoring.selfMonitoring.enabled) .Values.test.enabled }}
{{- fail "Helm test requires self monitoring to be enabled"}}
{{- end }}
-{{- if and (not .lokiCanary.enabled) $.Values.test.enabled }}
+{{- if and (not .Values.monitoring.lokiCanary.enabled) .Values.test.enabled }}
{{- fail "Helm test requires the Loki Canary to be enabled"}}
{{- end }}
-{{- end}}
-
{{- if and .Values.test.enabled (not .Values.test.prometheusAddress) }}
{{- fail "Helm test requires a prometheusAddress for an instance scraping the Loki canary's metrics"}}
{{- end }}
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index 631905342bf2d..caac7c937c544 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -697,34 +697,34 @@ monitoring:
# -- Additional clients for remote write
clients: null
- # The Loki canary pushes logs to and queries from this loki installation to test
- # that it's working correctly
- lokiCanary:
- enabled: true
- # -- Additional annotations for the `loki-canary` Daemonset
- annotations: {}
- # -- Additional CLI arguments for the `loki-canary' command
- extraArgs: []
- # -- Environment variables to add to the canary pods
- extraEnv: []
- # -- Environment variables from secrets or configmaps to add to the canary pods
- extraEnvFrom: []
- # -- Resource requests and limits for the canary
- resources: {}
- # -- Node selector for canary pods
- nodeSelector: {}
- # -- Tolerations for canary pods
- tolerations: []
- # -- Image to use for loki canary
- image:
- # -- The Docker registry
- registry: docker.io
- # -- Docker image repository
- repository: grafana/loki-canary
- # -- Overrides the image tag whose default is the chart's appVersion
- tag: null
- # -- Docker image pull policy
- pullPolicy: IfNotPresent
+ # The Loki canary pushes logs to and queries from this loki installation to test
+ # that it's working correctly
+ lokiCanary:
+ enabled: true
+ # -- Additional annotations for the `loki-canary` Daemonset
+ annotations: {}
+ # -- Additional CLI arguments for the `loki-canary' command
+ extraArgs: []
+ # -- Environment variables to add to the canary pods
+ extraEnv: []
+ # -- Environment variables from secrets or configmaps to add to the canary pods
+ extraEnvFrom: []
+ # -- Resource requests and limits for the canary
+ resources: {}
+ # -- Node selector for canary pods
+ nodeSelector: {}
+ # -- Tolerations for canary pods
+ tolerations: []
+ # -- Image to use for loki canary
+ image:
+ # -- The Docker registry
+ registry: docker.io
+ # -- Docker image repository
+ repository: grafana/loki-canary
+ # -- Overrides the image tag whose default is the chart's appVersion
+ tag: null
+ # -- Docker image pull policy
+ pullPolicy: IfNotPresent
# Configuration for the write
write:
|
helm
|
Decouple the Canary from self-monitoring (#7757)
|
541e93c157721a0634254b1846ad0f214f8f36df
|
2025-03-11 18:32:04
|
Jennifer Villa
|
docs: Update `approx_topk` documentation (#16223)
| false
|
diff --git a/docs/sources/query/metric_queries.md b/docs/sources/query/metric_queries.md
index 9e00aa8e86b78..309be3913ccd1 100644
--- a/docs/sources/query/metric_queries.md
+++ b/docs/sources/query/metric_queries.md
@@ -156,11 +156,27 @@ Examples:
## Probabilistic aggregation
-The `topk` keyword lets you find the largest 1,000 elements in a data stream by sample size. When `topk` hits the maximum series limit, LogQL also supports using a probable approximation; `approx_topk` is a drop-in replacement when `topk` hits the maximum series limit.
+LogQL's `approx_topk` function provides a probabilistic approximation of `topk`. It is a drop-in replacement for `topk` that is great for when `topk` queries time out or hit the maximum series limit. This tends to happen when the list of values that you're sorting through in order to find the most frequent values is very large. `approx_topk` is also great in cases where a faster, approximate answer is preferred to a slower, more accurate one.
+
+The function is of the form:
```logql
approx_topk(k, <vector expression>)
```
-It is only supported for instant queries and does not support grouping. It is useful when the cardinality of the inner
-vector is too high, for example, when it uses an aggregation by a structured metadata label.
+`approx_topk` is only supported for instant queries. Grouping is also not supported and should be handled by an inner `sum by` or `sum without` even though this might not be the same behavior as `topk by`.
+
+Under the hood, `approx_topk` is implemented using sharding. The [count-min sketch](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) algorithm and a heap are used to approximate the counts for each shard. The accuracy of the approximation depends on the size of the heap, which is defined by Loki's`max_count_min_sketch_heap_size` parameter. Accuracy decreases as `k` approaches the size of the heap (which has a default size of `10,000`).
+
+The expression `approx_topk(k,inner)` becomes
+
+```
+topk(
+ k,
+ eval_cms(
+ __count_min_sketch__(inner, shard=1) ++ __count_min_sketch__(inner, shard=2)...
+ )
+)
+```
+
+`__count_min_sketch__` is calculated for each shard and merged on the frontend. Then `eval_cms` iterates through the labels list and determines the count for each. Then `topk` selects the top items.
|
docs
|
Update `approx_topk` documentation (#16223)
|
c6e18a2efb60884209b3485240ef8790ff3839e6
|
2023-06-20 19:04:07
|
Dylan Guedes
|
loki: Humanize downloaded file size (bytes format) (#9746)
| false
|
diff --git a/pkg/storage/stores/indexshipper/storage/util.go b/pkg/storage/stores/indexshipper/storage/util.go
index c842b74d4e4f7..ff8563a1619e3 100644
--- a/pkg/storage/stores/indexshipper/storage/util.go
+++ b/pkg/storage/stores/indexshipper/storage/util.go
@@ -9,6 +9,7 @@ import (
"sync"
"time"
+ "github.com/dustin/go-humanize"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
gzip "github.com/klauspost/pgzip"
@@ -88,7 +89,7 @@ func DownloadFileFromStorage(destination string, decompressFile bool, sync bool,
}
if err == nil {
- logger = log.With(logger, "size", fStat.Size())
+ logger = log.With(logger, "size", humanize.Bytes(uint64(fStat.Size())))
}
level.Info(logger).Log("msg", "downloaded file", "total_time", time.Since(start))
|
loki
|
Humanize downloaded file size (bytes format) (#9746)
|
c02068ed17e2eb5037c1817eb2b393dec993db9d
|
2024-11-07 15:36:19
|
George Robinson
|
chore: remove initialization of Querier RF-1 (#14791)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index 234e7a06969ea..612946add4aeb 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -114,36 +114,6 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set
# querier.
[querier: <querier>]
-querier_rf1:
- # Enable the RF1 querier. If set, replaces the usual querier with an RF-1
- # querier.
- # CLI flag: -querier-rf1.enabled
- [enabled: <boolean> | default = false]
-
- # Time to wait before sending more than the minimum successful query requests.
- # CLI flag: -querier-rf1.extra-query-delay
- [extra_query_delay: <duration> | default = 0s]
-
- engine:
- # The maximum amount of time to look back for log lines. Used only for
- # instant log queries.
- # CLI flag: -querier-rf1.engine.max-lookback-period
- [max_look_back_period: <duration> | default = 30s]
-
- # The maximum number of labels the heap of a topk query using a count min
- # sketch can track.
- # CLI flag: -querier-rf1.engine.max-count-min-sketch-heap-size
- [max_count_min_sketch_heap_size: <int> | default = 10000]
-
- # The maximum number of queries that can be simultaneously processed by the
- # querier.
- # CLI flag: -querier-rf1.max-concurrent
- [max_concurrent: <int> | default = 4]
-
- # When true, querier limits sent via a header are enforced.
- # CLI flag: -querier-rf1.per-request-limits-enabled
- [per_request_limits_enabled: <boolean> | default = false]
-
# The query_scheduler block configures the Loki query scheduler. When configured
# it separates the tenant query queues from the query-frontend.
[query_scheduler: <query_scheduler>]
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 52af53b1b0558..7a3d20df2139e 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -51,7 +51,6 @@ import (
"github.com/grafana/loki/v3/pkg/lokifrontend/frontend/transport"
"github.com/grafana/loki/v3/pkg/pattern"
"github.com/grafana/loki/v3/pkg/querier"
- querierrf1 "github.com/grafana/loki/v3/pkg/querier-rf1"
"github.com/grafana/loki/v3/pkg/querier/queryrange"
"github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/v3/pkg/querier/worker"
@@ -87,7 +86,6 @@ type Config struct {
InternalServer internalserver.Config `yaml:"internal_server,omitempty" doc:"hidden"`
Distributor distributor.Config `yaml:"distributor,omitempty"`
Querier querier.Config `yaml:"querier,omitempty"`
- QuerierRF1 querierrf1.Config `yaml:"querier_rf1,omitempty"`
QueryScheduler scheduler.Config `yaml:"query_scheduler"`
Frontend lokifrontend.Config `yaml:"frontend,omitempty"`
QueryRange queryrange.Config `yaml:"query_range,omitempty"`
@@ -166,7 +164,6 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) {
c.Common.RegisterFlags(f)
c.Distributor.RegisterFlags(f)
c.Querier.RegisterFlags(f)
- c.QuerierRF1.RegisterFlags(f)
c.CompactorHTTPClient.RegisterFlags(f)
c.CompactorGRPCClient.RegisterFlags(f)
c.IngesterClient.RegisterFlags(f)
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index e2366d374ebf9..b8ebc89b64752 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -47,7 +47,6 @@ import (
"github.com/grafana/loki/v3/pkg/distributor"
"github.com/grafana/loki/v3/pkg/indexgateway"
"github.com/grafana/loki/v3/pkg/ingester"
- "github.com/grafana/loki/v3/pkg/ingester-rf1/objstore"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
@@ -57,7 +56,6 @@ import (
"github.com/grafana/loki/v3/pkg/lokifrontend/frontend/v2/frontendv2pb"
"github.com/grafana/loki/v3/pkg/pattern"
"github.com/grafana/loki/v3/pkg/querier"
- querierrf1 "github.com/grafana/loki/v3/pkg/querier-rf1"
"github.com/grafana/loki/v3/pkg/querier/queryrange"
"github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/v3/pkg/ruler"
@@ -394,21 +392,9 @@ func (t *Loki) initQuerier() (services.Service, error) {
return nil, err
}
- if t.Cfg.QuerierRF1.Enabled {
- logger.Log("Using RF-1 querier implementation")
- store, err := objstore.New(t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.ClientMetrics)
- if err != nil {
- return nil, err
- }
- t.Querier, err = querierrf1.New(t.Cfg.QuerierRF1, t.Store, t.Overrides, deleteStore, t.MetastoreClient, store, logger)
- if err != nil {
- return nil, err
- }
- } else {
- t.Querier, err = querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.Overrides, deleteStore, prometheus.DefaultRegisterer, logger)
- if err != nil {
- return nil, err
- }
+ t.Querier, err = querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.Overrides, deleteStore, prometheus.DefaultRegisterer, logger)
+ if err != nil {
+ return nil, err
}
if t.Cfg.Pattern.Enabled {
|
chore
|
remove initialization of Querier RF-1 (#14791)
|
1adb1e39d3d8701d1469f1803ca4e8c7a5951469
|
2025-01-27 21:59:32
|
Robert Fratto
|
chore(dataobj): add initial high-level APIs for reading streams and log records (#15974)
| false
|
diff --git a/pkg/dataobj/builder.go b/pkg/dataobj/builder.go
new file mode 100644
index 0000000000000..ba0391b9298f2
--- /dev/null
+++ b/pkg/dataobj/builder.go
@@ -0,0 +1,363 @@
+package dataobj
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "flag"
+ "fmt"
+
+ "github.com/grafana/dskit/flagext"
+ lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/encoding"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/logs"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/streams"
+ "github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/logql/syntax"
+)
+
+// ErrBufferFull is returned by [Builder.Append] when the buffer is full and
+// needs to flush; call [Builder.Flush] to flush it.
+var ErrBufferFull = errors.New("buffer full")
+
+// BuilderConfig configures a data object [Builder].
+type BuilderConfig struct {
+ // SHAPrefixSize sets the number of bytes of the SHA filename to use as a
+ // folder path.
+ SHAPrefixSize int `yaml:"sha_prefix_size"`
+
+ // TargetPageSize configures a target size for encoded pages within the data
+ // object. TargetPageSize accounts for encoding, but not for compression.
+ TargetPageSize flagext.Bytes `yaml:"target_page_size"`
+
+ // TODO(rfratto): We need an additional parameter for TargetMetadataSize, as
+ // metadata payloads can't be split and must be downloaded in a single
+ // request.
+ //
+ // At the moment, we don't have a good mechanism for implementing a metadata
+ // size limit (we need to support some form of section splitting or column
+ // combinations), so the option is omitted for now.
+
+ // TargetObjectSize configures a target size for data objects.
+ TargetObjectSize flagext.Bytes `yaml:"target_object_size"`
+
+ // TargetSectionSize configures the maximum size of data in a section. Sections
+ // which support this parameter will place overflow data into new sections of
+ // the same type.
+ TargetSectionSize flagext.Bytes `yaml:"target_section_size"`
+
+ // BufferSize configures the size of the buffer used to accumulate
+ // uncompressed logs in memory prior to sorting.
+ BufferSize flagext.Bytes `yaml:"buffer_size"`
+}
+
+// RegisterFlagsWithPrefix registers flags with the given prefix.
+func (cfg *BuilderConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ _ = cfg.TargetPageSize.Set("2MB")
+ _ = cfg.TargetObjectSize.Set("1GB")
+ _ = cfg.BufferSize.Set("16MB") // Page Size * 8
+ _ = cfg.TargetSectionSize.Set("128MB") // Target Object Size / 8
+
+ f.IntVar(&cfg.SHAPrefixSize, prefix+"sha-prefix-size", 2, "The size of the SHA prefix to use for the data object builder.")
+ f.Var(&cfg.TargetPageSize, prefix+"target-page-size", "The size of the target page to use for the data object builder.")
+ f.Var(&cfg.TargetObjectSize, prefix+"target-object-size", "The size of the target object to use for the data object builder.")
+ f.Var(&cfg.TargetSectionSize, prefix+"target-section-size", "Configures a maximum size for sections, for sections that support it.")
+ f.Var(&cfg.BufferSize, prefix+"buffer-size", "The size of the buffer to use for sorting logs.")
+}
+
+// Validate validates the BuilderConfig.
+func (cfg *BuilderConfig) Validate() error {
+ var errs []error
+
+ if cfg.SHAPrefixSize <= 0 {
+ errs = append(errs, errors.New("SHAPrefixSize must be greater than 0"))
+ }
+
+ if cfg.TargetPageSize <= 0 {
+ errs = append(errs, errors.New("TargetPageSize must be greater than 0"))
+ } else if cfg.TargetPageSize >= cfg.TargetObjectSize {
+ errs = append(errs, errors.New("TargetPageSize must be less than TargetObjectSize"))
+ }
+
+ if cfg.TargetObjectSize <= 0 {
+ errs = append(errs, errors.New("TargetObjectSize must be greater than 0"))
+ }
+
+ if cfg.BufferSize <= 0 {
+ errs = append(errs, errors.New("BufferSize must be greater than 0"))
+ }
+
+ if cfg.TargetSectionSize <= 0 || cfg.TargetSectionSize > cfg.TargetObjectSize {
+ errs = append(errs, errors.New("SectionSize must be greater than 0 and less than or equal to TargetObjectSize"))
+ }
+
+ return errors.Join(errs...)
+}
+
+// A Builder builds data objects from a set of incoming log data. Log data is
+// appended to a builder by calling [Builder.Append]. Buffered log data is
+// flushed manually by calling [Builder.Flush].
+//
+// Methods on Builder are not goroutine-safe; callers are responsible for
+// synchronizing calls.
+type Builder struct {
+ cfg BuilderConfig
+ metrics *metrics
+ bucket objstore.Bucket
+ tenantID string
+
+ labelCache *lru.Cache[string, labels.Labels]
+
+ currentSizeEstimate int
+ state builderState
+
+ streams *streams.Streams
+ logs *logs.Logs
+
+ flushBuffer *bytes.Buffer
+ encoder *encoding.Encoder
+}
+
+type builderState int
+
+const (
+ // builderStateReady indicates the builder is empty and ready to accept new data.
+ builderStateEmpty builderState = iota
+
+ // builderStateDirty indicates the builder has been modified since the last flush.
+ builderStateDirty
+
+ // builderStateFlushing indicates the builder has data to flush.
+ builderStateFlush
+)
+
+// NewBuilder creates a new Builder which stores data objects for the specified
+// tenant in a bucket.
+//
+// NewBuilder returns an error if BuilderConfig is invalid.
+func NewBuilder(cfg BuilderConfig, bucket objstore.Bucket, tenantID string) (*Builder, error) {
+ if err := cfg.Validate(); err != nil {
+ return nil, err
+ }
+
+ labelCache, err := lru.New[string, labels.Labels](5000)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create LRU cache: %w", err)
+ }
+
+ var (
+ metrics = newMetrics()
+
+ flushBuffer = bytes.NewBuffer(make([]byte, 0, int(cfg.TargetObjectSize)))
+ encoder = encoding.NewEncoder(flushBuffer)
+ )
+ metrics.ObserveConfig(cfg)
+
+ return &Builder{
+ cfg: cfg,
+ metrics: metrics,
+ bucket: bucket,
+ tenantID: tenantID,
+
+ labelCache: labelCache,
+
+ streams: streams.New(metrics.streams, int(cfg.TargetPageSize)),
+ logs: logs.New(metrics.logs, logs.Options{
+ PageSizeHint: int(cfg.TargetPageSize),
+ BufferSize: int(cfg.BufferSize),
+ SectionSize: int(cfg.TargetSectionSize),
+ }),
+
+ flushBuffer: flushBuffer,
+ encoder: encoder,
+ }, nil
+}
+
+// Append buffers a stream to be written to a data object. Append returns an
+// error if the stream labels cannot be parsed or [ErrBufferFull] if the
+// builder is full.
+//
+// Once a Builder is full, call [Builder.Flush] to flush the buffered data,
+// then call Append again with the same entry.
+func (b *Builder) Append(stream logproto.Stream) error {
+ // Don't allow appending to a builder that has data to be flushed.
+ if b.state == builderStateFlush {
+ return ErrBufferFull
+ }
+
+ ls, err := b.parseLabels(stream.Labels)
+ if err != nil {
+ return err
+ }
+
+ // Check whether the buffer is full before a stream can be appended; this is
+ // tends to overestimate, but we may still go over our target size.
+ //
+ // Since this check only happens after the first call to Append,
+ // b.currentSizeEstimate will always be updated to reflect the size following
+ // the previous append.
+ if b.state != builderStateEmpty && b.currentSizeEstimate+labelsEstimate(ls)+streamSizeEstimate(stream) > int(b.cfg.TargetObjectSize) {
+ return ErrBufferFull
+ }
+
+ timer := prometheus.NewTimer(b.metrics.appendTime)
+ defer timer.ObserveDuration()
+
+ for _, entry := range stream.Entries {
+ streamID := b.streams.Record(ls, entry.Timestamp)
+
+ b.logs.Append(logs.Record{
+ StreamID: streamID,
+ Timestamp: entry.Timestamp,
+ Metadata: entry.StructuredMetadata,
+ Line: entry.Line,
+ })
+ }
+
+ b.currentSizeEstimate = b.estimatedSize()
+ b.state = builderStateDirty
+ return nil
+}
+
+func (b *Builder) parseLabels(labelString string) (labels.Labels, error) {
+ labels, ok := b.labelCache.Get(labelString)
+ if ok {
+ return labels, nil
+ }
+
+ labels, err := syntax.ParseLabels(labelString)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse labels: %w", err)
+ }
+ b.labelCache.Add(labelString, labels)
+ return labels, nil
+}
+
+func (b *Builder) estimatedSize() int {
+ var size int
+ size += b.streams.EstimatedSize()
+ size += b.logs.EstimatedSize()
+ b.metrics.sizeEstimate.Set(float64(size))
+ return size
+}
+
+// labelsEstimate estimates the size of a set of labels in bytes.
+func labelsEstimate(ls labels.Labels) int {
+ var (
+ keysSize int
+ valuesSize int
+ )
+
+ for _, l := range ls {
+ keysSize += len(l.Name)
+ valuesSize += len(l.Value)
+ }
+
+ // Keys are stored as columns directly, while values get compressed. We'll
+ // underestimate a 2x compression ratio.
+ return keysSize + valuesSize/2
+}
+
+// streamSizeEstimate estimates the size of a stream in bytes.
+func streamSizeEstimate(stream logproto.Stream) int {
+ var size int
+ for _, entry := range stream.Entries {
+ // We only check the size of the line and metadata. Timestamps and IDs
+ // encode so well that they're unlikely to make a singificant impact on our
+ // size estimate.
+ size += len(entry.Line) / 2 // Line with 2x compression ratio
+ for _, md := range entry.StructuredMetadata {
+ size += len(md.Name) + len(md.Value)/2
+ }
+ }
+ return size
+}
+
+// Flush flushes all buffered data to object storage. Calling Flush can result
+// in a no-op if there is no buffered data to flush.
+//
+// If Flush builds an object but fails to upload it to object storage, the
+// built object is cached and can be retried. [Builder.Reset] can be called to
+// discard any pending data and allow new data to be appended.
+func (b *Builder) Flush(ctx context.Context) error {
+ switch b.state {
+ case builderStateEmpty:
+ return nil // Nothing to flush
+ case builderStateDirty:
+ if err := b.buildObject(); err != nil {
+ return fmt.Errorf("building object: %w", err)
+ }
+ b.state = builderStateFlush
+ }
+
+ timer := prometheus.NewTimer(b.metrics.flushTime)
+ defer timer.ObserveDuration()
+
+ sum := sha256.Sum224(b.flushBuffer.Bytes())
+ sumStr := hex.EncodeToString(sum[:])
+
+ objectPath := fmt.Sprintf("tenant-%s/objects/%s/%s", b.tenantID, sumStr[:b.cfg.SHAPrefixSize], sumStr[b.cfg.SHAPrefixSize:])
+ if err := b.bucket.Upload(ctx, objectPath, bytes.NewReader(b.flushBuffer.Bytes())); err != nil {
+ return err
+ }
+
+ b.Reset()
+ return nil
+}
+
+func (b *Builder) buildObject() error {
+ timer := prometheus.NewTimer(b.metrics.buildTime)
+ defer timer.ObserveDuration()
+
+ // We reset after a successful flush, but we also reset the buffer before
+ // building for safety.
+ b.flushBuffer.Reset()
+
+ if err := b.streams.EncodeTo(b.encoder); err != nil {
+ return fmt.Errorf("encoding streams: %w", err)
+ } else if err := b.logs.EncodeTo(b.encoder); err != nil {
+ return fmt.Errorf("encoding logs: %w", err)
+ } else if err := b.encoder.Flush(); err != nil {
+ return fmt.Errorf("encoding object: %w", err)
+ }
+
+ b.metrics.builtSize.Observe(float64(b.flushBuffer.Len()))
+
+ // We pass context.Background() below to avoid allowing building an object to
+ // time out; timing out on build would discard anything we built and would
+ // cause data loss.
+ dec := encoding.ReaderAtDecoder(bytes.NewReader(b.flushBuffer.Bytes()), int64(b.flushBuffer.Len()))
+ return b.metrics.encoding.Observe(context.Background(), dec)
+}
+
+// Reset discards pending data and resets the builder to an empty state.
+func (b *Builder) Reset() {
+ b.logs.Reset()
+ b.streams.Reset()
+
+ b.state = builderStateEmpty
+ b.flushBuffer.Reset()
+ b.metrics.sizeEstimate.Set(0)
+}
+
+// RegisterMetrics registers metrics about builder to report to reg. All
+// metrics will have a tenant label set to the tenant ID of the Builder.
+//
+// If multiple Builders for the same tenant are running in the same process,
+// reg must contain additional labels to differentiate between them.
+func (b *Builder) RegisterMetrics(reg prometheus.Registerer) error {
+ reg = prometheus.WrapRegistererWith(prometheus.Labels{"tenant": b.tenantID}, reg)
+ return b.metrics.Register(reg)
+}
+
+// UnregisterMetrics unregisters metrics about builder from reg.
+func (b *Builder) UnregisterMetrics(reg prometheus.Registerer) {
+ reg = prometheus.WrapRegistererWith(prometheus.Labels{"tenant": b.tenantID}, reg)
+ b.metrics.Unregister(reg)
+}
diff --git a/pkg/dataobj/dataobj_test.go b/pkg/dataobj/builder_test.go
similarity index 62%
rename from pkg/dataobj/dataobj_test.go
rename to pkg/dataobj/builder_test.go
index 6c75d722ae5c8..365f6a5d6196f 100644
--- a/pkg/dataobj/dataobj_test.go
+++ b/pkg/dataobj/builder_test.go
@@ -1,10 +1,9 @@
package dataobj
import (
- "cmp"
"context"
"errors"
- "slices"
+ "fmt"
"strings"
"testing"
"time"
@@ -16,7 +15,6 @@ import (
"github.com/grafana/loki/v3/pkg/dataobj/internal/result"
"github.com/grafana/loki/v3/pkg/logproto"
- "github.com/grafana/loki/v3/pkg/logql/syntax"
)
var testBuilderConfig = BuilderConfig{
@@ -29,7 +27,7 @@ var testBuilderConfig = BuilderConfig{
BufferSize: 2048 * 8,
}
-func Test(t *testing.T) {
+func TestBuilder(t *testing.T) {
bucket := objstore.NewInMemBucket()
streams := []logproto.Stream{
@@ -87,21 +85,21 @@ func Test(t *testing.T) {
})
t.Run("Read", func(t *testing.T) {
- reader := newReader(bucket)
-
- objects, err := result.Collect(reader.Objects(context.Background(), "fake"))
+ objects, err := result.Collect(listObjects(context.Background(), bucket, "fake"))
require.NoError(t, err)
require.Len(t, objects, 1)
- actual, err := result.Collect(reader.Streams(context.Background(), objects[0]))
+ obj := FromBucket(bucket, objects[0])
+ md, err := obj.Metadata(context.Background())
require.NoError(t, err)
- require.Equal(t, sortStreams(t, streams), actual)
+ require.Equal(t, 1, md.StreamsSections)
+ require.Equal(t, 1, md.LogsSections)
})
}
-// Test_Builder_Append ensures that appending to the buffer eventually reports
+// TestBuilder_Append ensures that appending to the buffer eventually reports
// that the buffer is full.
-func Test_Builder_Append(t *testing.T) {
+func TestBuilder_Append(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
@@ -127,43 +125,24 @@ func Test_Builder_Append(t *testing.T) {
}
}
-// sortStreams returns a new slice of streams where entries in individual
-// streams are sorted by timestamp and structured metadata are sorted by key.
-// The order of streams is preserved.
-func sortStreams(t *testing.T, streams []logproto.Stream) []logproto.Stream {
- t.Helper()
-
- res := make([]logproto.Stream, len(streams))
- for i, in := range streams {
- labels, err := syntax.ParseLabels(in.Labels)
- require.NoError(t, err)
-
- res[i] = logproto.Stream{
- Labels: labels.String(),
- Entries: slices.Clone(in.Entries),
- Hash: labels.Hash(),
- }
-
- for j, ent := range res[i].Entries {
- res[i].Entries[j].StructuredMetadata = slices.Clone(ent.StructuredMetadata)
- slices.SortFunc(res[i].Entries[j].StructuredMetadata, func(i, j push.LabelAdapter) int {
- return cmp.Compare(i.Name, j.Name)
- })
- }
+func listObjects(ctx context.Context, bucket objstore.Bucket, tenant string) result.Seq[string] {
+ tenantPath := fmt.Sprintf("tenant-%s/objects/", tenant)
- slices.SortFunc(res[i].Entries, func(i, j push.Entry) int {
- switch {
- case i.Timestamp.Before(j.Timestamp):
- return -1
+ return result.Iter(func(yield func(string) bool) error {
+ errIterationStopped := errors.New("iteration stopped")
- case i.Timestamp.After(j.Timestamp):
- return 1
-
- default:
- return 0
+ err := bucket.Iter(ctx, tenantPath, func(name string) error {
+ if !yield(name) {
+ return errIterationStopped
}
- })
- }
-
- return res
+ return nil
+ }, objstore.WithRecursiveIter())
+
+ switch {
+ case errors.Is(err, errIterationStopped):
+ return nil
+ default:
+ return err
+ }
+ })
}
diff --git a/pkg/dataobj/dataobj.go b/pkg/dataobj/dataobj.go
index 62ce8c0a16c8f..23cb094f0db5a 100644
--- a/pkg/dataobj/dataobj.go
+++ b/pkg/dataobj/dataobj.go
@@ -2,363 +2,54 @@
package dataobj
import (
- "bytes"
"context"
- "crypto/sha256"
- "encoding/hex"
- "errors"
- "flag"
"fmt"
+ "io"
- "github.com/grafana/dskit/flagext"
- lru "github.com/hashicorp/golang-lru/v2"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/prometheus/model/labels"
"github.com/thanos-io/objstore"
"github.com/grafana/loki/v3/pkg/dataobj/internal/encoding"
- "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/logs"
- "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/streams"
- "github.com/grafana/loki/v3/pkg/logproto"
- "github.com/grafana/loki/v3/pkg/logql/syntax"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/metadata/filemd"
)
-// ErrBufferFull is returned by [Builder.Append] when the buffer is full and
-// needs to flush; call [Builder.Flush] to flush it.
-var ErrBufferFull = errors.New("buffer full")
-
-// BuilderConfig configures a data object [Builder].
-type BuilderConfig struct {
- // SHAPrefixSize sets the number of bytes of the SHA filename to use as a
- // folder path.
- SHAPrefixSize int `yaml:"sha_prefix_size"`
-
- // TargetPageSize configures a target size for encoded pages within the data
- // object. TargetPageSize accounts for encoding, but not for compression.
- TargetPageSize flagext.Bytes `yaml:"target_page_size"`
-
- // TODO(rfratto): We need an additional parameter for TargetMetadataSize, as
- // metadata payloads can't be split and must be downloaded in a single
- // request.
- //
- // At the moment, we don't have a good mechanism for implementing a metadata
- // size limit (we need to support some form of section splitting or column
- // combinations), so the option is omitted for now.
-
- // TargetObjectSize configures a target size for data objects.
- TargetObjectSize flagext.Bytes `yaml:"target_object_size"`
-
- // TargetSectionSize configures the maximum size of data in a section. Sections
- // which support this parameter will place overflow data into new sections of
- // the same type.
- TargetSectionSize flagext.Bytes `yaml:"target_section_size"`
-
- // BufferSize configures the size of the buffer used to accumulate
- // uncompressed logs in memory prior to sorting.
- BufferSize flagext.Bytes `yaml:"buffer_size"`
+// An Object is a representation of a data object.
+type Object struct {
+ dec encoding.Decoder
}
-// RegisterFlagsWithPrefix registers flags with the given prefix.
-func (cfg *BuilderConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
- _ = cfg.TargetPageSize.Set("2MB")
- _ = cfg.TargetObjectSize.Set("1GB")
- _ = cfg.BufferSize.Set("16MB") // Page Size * 8
- _ = cfg.TargetSectionSize.Set("128MB") // Target Object Size / 8
-
- f.IntVar(&cfg.SHAPrefixSize, prefix+"sha-prefix-size", 2, "The size of the SHA prefix to use for the data object builder.")
- f.Var(&cfg.TargetPageSize, prefix+"target-page-size", "The size of the target page to use for the data object builder.")
- f.Var(&cfg.TargetObjectSize, prefix+"target-object-size", "The size of the target object to use for the data object builder.")
- f.Var(&cfg.TargetSectionSize, prefix+"target-section-size", "Configures a maximum size for sections, for sections that support it.")
- f.Var(&cfg.BufferSize, prefix+"buffer-size", "The size of the buffer to use for sorting logs.")
+// FromBucket opens an Object from the given storage bucket and path.
+func FromBucket(bucket objstore.Bucket, path string) *Object {
+ return &Object{dec: encoding.BucketDecoder(bucket, path)}
}
-// Validate validates the BuilderConfig.
-func (cfg *BuilderConfig) Validate() error {
- var errs []error
-
- if cfg.SHAPrefixSize <= 0 {
- errs = append(errs, errors.New("SHAPrefixSize must be greater than 0"))
- }
-
- if cfg.TargetPageSize <= 0 {
- errs = append(errs, errors.New("TargetPageSize must be greater than 0"))
- } else if cfg.TargetPageSize >= cfg.TargetObjectSize {
- errs = append(errs, errors.New("TargetPageSize must be less than TargetObjectSize"))
- }
-
- if cfg.TargetObjectSize <= 0 {
- errs = append(errs, errors.New("TargetObjectSize must be greater than 0"))
- }
-
- if cfg.BufferSize <= 0 {
- errs = append(errs, errors.New("BufferSize must be greater than 0"))
- }
-
- if cfg.TargetSectionSize <= 0 || cfg.TargetSectionSize > cfg.TargetObjectSize {
- errs = append(errs, errors.New("SectionSize must be greater than 0 and less than or equal to TargetObjectSize"))
- }
-
- return errors.Join(errs...)
+// FromReadSeeker opens an Object from the given ReaderAt. The size argument
+// specifies the size of the data object in bytes.
+func FromReaderAt(r io.ReaderAt, size int64) *Object {
+ return &Object{dec: encoding.ReaderAtDecoder(r, size)}
}
-// A Builder builds data objects from a set of incoming log data. Log data is
-// appended to a builder by calling [Builder.Append]. Buffered log data is
-// flushed manually by calling [Builder.Flush].
-//
-// Methods on Builder are not goroutine-safe; callers are responsible for
-// synchronizing calls.
-type Builder struct {
- cfg BuilderConfig
- metrics *metrics
- bucket objstore.Bucket
- tenantID string
-
- labelCache *lru.Cache[string, labels.Labels]
-
- currentSizeEstimate int
- state builderState
-
- streams *streams.Streams
- logs *logs.Logs
-
- flushBuffer *bytes.Buffer
- encoder *encoding.Encoder
+// Metadata holds high-level metadata about an [Object].
+type Metadata struct {
+ StreamsSections int // Number of streams sections in the Object.
+ LogsSections int // Number of logs sections in the Object.
}
-type builderState int
-
-const (
- // builderStateReady indicates the builder is empty and ready to accept new data.
- builderStateEmpty builderState = iota
-
- // builderStateDirty indicates the builder has been modified since the last flush.
- builderStateDirty
-
- // builderStateFlushing indicates the builder has data to flush.
- builderStateFlush
-)
-
-// NewBuilder creates a new Builder which stores data objects for the specified
-// tenant in a bucket.
-//
-// NewBuilder returns an error if BuilderConfig is invalid.
-func NewBuilder(cfg BuilderConfig, bucket objstore.Bucket, tenantID string) (*Builder, error) {
- if err := cfg.Validate(); err != nil {
- return nil, err
- }
-
- labelCache, err := lru.New[string, labels.Labels](5000)
+// Metadata returns the metadata of the Object. Metadata returns an error if
+// the object cannot be read.
+func (o *Object) Metadata(ctx context.Context) (Metadata, error) {
+ si, err := o.dec.Sections(ctx)
if err != nil {
- return nil, fmt.Errorf("failed to create LRU cache: %w", err)
- }
-
- var (
- metrics = newMetrics()
-
- flushBuffer = bytes.NewBuffer(make([]byte, 0, int(cfg.TargetObjectSize)))
- encoder = encoding.NewEncoder(flushBuffer)
- )
- metrics.ObserveConfig(cfg)
-
- return &Builder{
- cfg: cfg,
- metrics: metrics,
- bucket: bucket,
- tenantID: tenantID,
-
- labelCache: labelCache,
-
- streams: streams.New(metrics.streams, int(cfg.TargetPageSize)),
- logs: logs.New(metrics.logs, logs.Options{
- PageSizeHint: int(cfg.TargetPageSize),
- BufferSize: int(cfg.BufferSize),
- SectionSize: int(cfg.TargetSectionSize),
- }),
-
- flushBuffer: flushBuffer,
- encoder: encoder,
- }, nil
-}
-
-// Append buffers a stream to be written to a data object. Append returns an
-// error if the stream labels cannot be parsed or [ErrBufferFull] if the
-// builder is full.
-//
-// Once a Builder is full, call [Builder.Flush] to flush the buffered data,
-// then call Append again with the same entry.
-func (b *Builder) Append(stream logproto.Stream) error {
- // Don't allow appending to a builder that has data to be flushed.
- if b.state == builderStateFlush {
- return ErrBufferFull
- }
-
- ls, err := b.parseLabels(stream.Labels)
- if err != nil {
- return err
- }
-
- // Check whether the buffer is full before a stream can be appended; this is
- // tends to overestimate, but we may still go over our target size.
- //
- // Since this check only happens after the first call to Append,
- // b.currentSizeEstimate will always be updated to reflect the size following
- // the previous append.
- if b.state != builderStateEmpty && b.currentSizeEstimate+labelsEstimate(ls)+streamSizeEstimate(stream) > int(b.cfg.TargetObjectSize) {
- return ErrBufferFull
+ return Metadata{}, fmt.Errorf("reading sections: %w", err)
}
- timer := prometheus.NewTimer(b.metrics.appendTime)
- defer timer.ObserveDuration()
-
- for _, entry := range stream.Entries {
- streamID := b.streams.Record(ls, entry.Timestamp)
-
- b.logs.Append(logs.Record{
- StreamID: streamID,
- Timestamp: entry.Timestamp,
- Metadata: entry.StructuredMetadata,
- Line: entry.Line,
- })
- }
-
- b.currentSizeEstimate = b.estimatedSize()
- b.state = builderStateDirty
- return nil
-}
-
-func (b *Builder) parseLabels(labelString string) (labels.Labels, error) {
- labels, ok := b.labelCache.Get(labelString)
- if ok {
- return labels, nil
- }
-
- labels, err := syntax.ParseLabels(labelString)
- if err != nil {
- return nil, fmt.Errorf("failed to parse labels: %w", err)
- }
- b.labelCache.Add(labelString, labels)
- return labels, nil
-}
-
-func (b *Builder) estimatedSize() int {
- var size int
- size += b.streams.EstimatedSize()
- size += b.logs.EstimatedSize()
- b.metrics.sizeEstimate.Set(float64(size))
- return size
-}
-
-// labelsEstimate estimates the size of a set of labels in bytes.
-func labelsEstimate(ls labels.Labels) int {
- var (
- keysSize int
- valuesSize int
- )
-
- for _, l := range ls {
- keysSize += len(l.Name)
- valuesSize += len(l.Value)
- }
-
- // Keys are stored as columns directly, while values get compressed. We'll
- // underestimate a 2x compression ratio.
- return keysSize + valuesSize/2
-}
-
-// streamSizeEstimate estimates the size of a stream in bytes.
-func streamSizeEstimate(stream logproto.Stream) int {
- var size int
- for _, entry := range stream.Entries {
- // We only check the size of the line and metadata. Timestamps and IDs
- // encode so well that they're unlikely to make a singificant impact on our
- // size estimate.
- size += len(entry.Line) / 2 // Line with 2x compression ratio
- for _, md := range entry.StructuredMetadata {
- size += len(md.Name) + len(md.Value)/2
- }
- }
- return size
-}
-
-// Flush flushes all buffered data to object storage. Calling Flush can result
-// in a no-op if there is no buffered data to flush.
-//
-// If Flush builds an object but fails to upload it to object storage, the
-// built object is cached and can be retried. [Builder.Reset] can be called to
-// discard any pending data and allow new data to be appended.
-func (b *Builder) Flush(ctx context.Context) error {
- switch b.state {
- case builderStateEmpty:
- return nil // Nothing to flush
- case builderStateDirty:
- if err := b.buildObject(); err != nil {
- return fmt.Errorf("building object: %w", err)
+ var md Metadata
+ for _, s := range si {
+ switch s.Type {
+ case filemd.SECTION_TYPE_STREAMS:
+ md.StreamsSections++
+ case filemd.SECTION_TYPE_LOGS:
+ md.LogsSections++
}
- b.state = builderStateFlush
- }
-
- timer := prometheus.NewTimer(b.metrics.flushTime)
- defer timer.ObserveDuration()
-
- sum := sha256.Sum224(b.flushBuffer.Bytes())
- sumStr := hex.EncodeToString(sum[:])
-
- objectPath := fmt.Sprintf("tenant-%s/objects/%s/%s", b.tenantID, sumStr[:b.cfg.SHAPrefixSize], sumStr[b.cfg.SHAPrefixSize:])
- if err := b.bucket.Upload(ctx, objectPath, bytes.NewReader(b.flushBuffer.Bytes())); err != nil {
- return err
}
-
- b.Reset()
- return nil
-}
-
-func (b *Builder) buildObject() error {
- timer := prometheus.NewTimer(b.metrics.buildTime)
- defer timer.ObserveDuration()
-
- // We reset after a successful flush, but we also reset the buffer before
- // building for safety.
- b.flushBuffer.Reset()
-
- if err := b.streams.EncodeTo(b.encoder); err != nil {
- return fmt.Errorf("encoding streams: %w", err)
- } else if err := b.logs.EncodeTo(b.encoder); err != nil {
- return fmt.Errorf("encoding logs: %w", err)
- } else if err := b.encoder.Flush(); err != nil {
- return fmt.Errorf("encoding object: %w", err)
- }
-
- b.metrics.builtSize.Observe(float64(b.flushBuffer.Len()))
-
- // We pass context.Background() below to avoid allowing building an object to
- // time out; timing out on build would discard anything we built and would
- // cause data loss.
- dec := encoding.ReaderAtDecoder(bytes.NewReader(b.flushBuffer.Bytes()), int64(b.flushBuffer.Len()))
- return b.metrics.encoding.Observe(context.Background(), dec)
-}
-
-// Reset discards pending data and resets the builder to an empty state.
-func (b *Builder) Reset() {
- b.logs.Reset()
- b.streams.Reset()
-
- b.state = builderStateEmpty
- b.flushBuffer.Reset()
- b.metrics.sizeEstimate.Set(0)
-}
-
-// RegisterMetrics registers metrics about builder to report to reg. All
-// metrics will have a tenant label set to the tenant ID of the Builder.
-//
-// If multiple Builders for the same tenant are running in the same process,
-// reg must contain additional labels to differentiate between them.
-func (b *Builder) RegisterMetrics(reg prometheus.Registerer) error {
- reg = prometheus.WrapRegistererWith(prometheus.Labels{"tenant": b.tenantID}, reg)
- return b.metrics.Register(reg)
-}
-
-// UnregisterMetrics unregisters metrics about builder from reg.
-func (b *Builder) UnregisterMetrics(reg prometheus.Registerer) {
- reg = prometheus.WrapRegistererWith(prometheus.Labels{"tenant": b.tenantID}, reg)
- b.metrics.Unregister(reg)
+ return md, nil
}
diff --git a/pkg/dataobj/internal/sections/logs/iter.go b/pkg/dataobj/internal/sections/logs/iter.go
index 9d2db95e3df5b..361e0bc526df3 100644
--- a/pkg/dataobj/internal/sections/logs/iter.go
+++ b/pkg/dataobj/internal/sections/logs/iter.go
@@ -33,7 +33,7 @@ func Iter(ctx context.Context, dec encoding.Decoder) result.Seq[Record] {
continue
}
- for result := range iterSection(ctx, logsDec, section) {
+ for result := range IterSection(ctx, logsDec, section) {
if result.Err() != nil || !yield(result.MustValue()) {
return result.Err()
}
@@ -44,7 +44,7 @@ func Iter(ctx context.Context, dec encoding.Decoder) result.Seq[Record] {
})
}
-func iterSection(ctx context.Context, dec encoding.LogsDecoder, section *filemd.SectionInfo) result.Seq[Record] {
+func IterSection(ctx context.Context, dec encoding.LogsDecoder, section *filemd.SectionInfo) result.Seq[Record] {
return result.Iter(func(yield func(Record) bool) error {
// We need to pull the columns twice: once from the dataset implementation
// and once for the metadata to retrieve column type.
diff --git a/pkg/dataobj/internal/sections/streams/iter.go b/pkg/dataobj/internal/sections/streams/iter.go
index 4443aa10eb646..b75da46262c80 100644
--- a/pkg/dataobj/internal/sections/streams/iter.go
+++ b/pkg/dataobj/internal/sections/streams/iter.go
@@ -31,7 +31,7 @@ func Iter(ctx context.Context, dec encoding.Decoder) result.Seq[Stream] {
continue
}
- for result := range iterSection(ctx, streamsDec, section) {
+ for result := range IterSection(ctx, streamsDec, section) {
if result.Err() != nil || !yield(result.MustValue()) {
return result.Err()
}
@@ -42,7 +42,7 @@ func Iter(ctx context.Context, dec encoding.Decoder) result.Seq[Stream] {
})
}
-func iterSection(ctx context.Context, dec encoding.StreamsDecoder, section *filemd.SectionInfo) result.Seq[Stream] {
+func IterSection(ctx context.Context, dec encoding.StreamsDecoder, section *filemd.SectionInfo) result.Seq[Stream] {
return result.Iter(func(yield func(Stream) bool) error {
// We need to pull the columns twice: once from the dataset implementation
// and once for the metadata to retrieve column type.
diff --git a/pkg/dataobj/logs_reader.go b/pkg/dataobj/logs_reader.go
new file mode 100644
index 0000000000000..1261d90acfd58
--- /dev/null
+++ b/pkg/dataobj/logs_reader.go
@@ -0,0 +1,294 @@
+package dataobj
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "iter"
+ "sort"
+ "time"
+
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/grafana/loki/pkg/push"
+
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/metadata/filemd"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/result"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/logs"
+)
+
+// Predicates for reading logs.
+type (
+ // MetadataMatcher is a predicate for matching metadata in a logs section.
+ // MetadataMatcher predicates assert that a metadata entry named Name exists
+ // and its value is set to Value.
+ //
+ // For equality matches, MetadataMatcher should always be used;
+ // MetadataMatchers can translate into more efficient filter operations than
+ // a [MetadataFilter] can.
+ MetadataMatcher struct{ Name, Value string }
+
+ // MetadataFilter is a predicate for matching metadata in a logs section.
+ // MetadataFilter predicates return a true value when the combination of the
+ // provided metadata entry name and value should be included in the result.
+ //
+ // MetadataFilter predicates should be only used for more complex filtering;
+ // for equality matches, [MetadataMatcher]s are more efficient.
+ MetadataFilter func(name, value string) bool
+)
+
+// A Record is an individual log record in a data object.
+type Record struct {
+ StreamID int64 // StreamID associated with the log record.
+ Timestamp time.Time // Timestamp of the log record.
+ Metadata labels.Labels // Set of metadata associated with the log record.
+ Line string // Line of the log record.
+}
+
+// LogsReader reads the set of logs from an [Object].
+type LogsReader struct {
+ obj *Object
+ idx int
+
+ matchers map[string]string
+ filters map[string]MetadataFilter
+ matchIDs map[int64]struct{}
+
+ next func() (result.Result[logs.Record], bool)
+ stop func()
+}
+
+// NewLogsReader creates a new LogsReader that reads from the logs section of
+// the given object.
+func NewLogsReader(obj *Object, sectionIndex int) *LogsReader {
+ var lr LogsReader
+ lr.Reset(obj, sectionIndex)
+ return &lr
+}
+
+// MatchStreams provides a sequence of stream IDs for the logs reader to match.
+// [LogsReader.Read] will only return logs for the provided stream IDs.
+//
+// MatchStreams may be called multiple times to match multiple sets of streams.
+//
+// MatchStreams may only be called before reading begins or after a call to
+// [LogsReader.Reset].
+func (r *LogsReader) MatchStreams(ids iter.Seq[int64]) error {
+ if r.next != nil {
+ return fmt.Errorf("cannot change matched streams after reading has started")
+ }
+
+ if r.matchIDs == nil {
+ r.matchIDs = make(map[int64]struct{})
+ }
+ for id := range ids {
+ r.matchIDs[id] = struct{}{}
+ }
+ return nil
+}
+
+// AddMetadataMatcher adds a metadata matcher to the LogsReader.
+// [LogsReader.Read] will only return logs for which the metadata matcher
+// predicate passes.
+//
+// AddMetadataMatcher may only be called before reading begins or after a call
+// to [LogsReader.Reset].
+func (r *LogsReader) AddMetadataMatcher(m MetadataMatcher) error {
+ if r.next != nil {
+ return fmt.Errorf("cannot add metadata matcher after reading has started")
+ }
+
+ if r.matchers == nil {
+ r.matchers = make(map[string]string)
+ }
+ r.matchers[m.Name] = m.Value
+ return nil
+}
+
+// AddMetadataFilter adds a metadata filter to the LogsReader.
+// [LogsReader.Read] will only return records for which the metadata filter
+// predicate passes. The filter f will be called with the provided key to allow
+// the same function to be reused for multiple keys.
+//
+// AddMetadataFilter may only be called before reading begins or after a call
+// to [LogsReader.Reset].
+func (r *LogsReader) AddMetadataFilter(key string, f MetadataFilter) error {
+ if r.next != nil {
+ return fmt.Errorf("cannot add metadata filter after reading has started")
+ }
+
+ if r.filters == nil {
+ r.filters = make(map[string]MetadataFilter)
+ }
+ r.filters[key] = f
+ return nil
+}
+
+// Read reads up to the next len(s) records from the reader and stores them
+// into s. It returns the number of records read and any error encountered. At
+// the end of the logs section, Read returns 0, io.EOF.
+func (r *LogsReader) Read(ctx context.Context, s []Record) (int, error) {
+ // TODO(rfratto): The implementation below is the initial, naive approach. It
+ // lacks a few features that will be needed at scale:
+ //
+ // * Read columns/pages in batches of len(s), rather than one row at a time,
+ //
+ // * Add page-level filtering based on min/max page values to quickly filter
+ // out batches of rows without needing to download or decode them.
+ //
+ // * Download pages in batches, rather than one at a time.
+ //
+ // * Only download/decode non-predicate columns following finding rows that
+ // match all predicate columns.
+ //
+ // * Reuse as much memory as possible from a combination of s and the state
+ // of LogsReader.
+ //
+ // These details can change internally without changing the API exposed by
+ // LogsReader, which is designed to permit efficient use in the future.
+
+ if r.obj == nil {
+ return 0, io.EOF
+ } else if r.idx < 0 {
+ return 0, fmt.Errorf("invalid section index %d", r.idx)
+ }
+
+ if r.next == nil {
+ err := r.initIter(ctx)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ for i := range s {
+ res, ok := r.nextMatching()
+ if !ok {
+ return i, io.EOF
+ }
+
+ record, err := res.Value()
+ if err != nil {
+ return i, fmt.Errorf("reading record: %w", err)
+ }
+
+ s[i] = Record{
+ StreamID: record.StreamID,
+ Timestamp: record.Timestamp,
+ Metadata: convertMetadata(record.Metadata),
+ Line: record.Line,
+ }
+ }
+
+ return len(s), nil
+}
+
+func (r *LogsReader) initIter(ctx context.Context) error {
+ sec, err := r.findSection(ctx)
+ if err != nil {
+ return fmt.Errorf("finding section: %w", err)
+ }
+
+ if r.stop != nil {
+ r.stop()
+ }
+
+ seq := logs.IterSection(ctx, r.obj.dec.LogsDecoder(), sec)
+ r.next, r.stop = result.Pull(seq)
+ return nil
+}
+
+func (r *LogsReader) findSection(ctx context.Context) (*filemd.SectionInfo, error) {
+ si, err := r.obj.dec.Sections(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading sections: %w", err)
+ }
+
+ var n int
+
+ for _, s := range si {
+ if s.Type == filemd.SECTION_TYPE_LOGS {
+ if n == r.idx {
+ return s, nil
+ }
+ n++
+ }
+ }
+
+ return nil, fmt.Errorf("section index %d not found", r.idx)
+}
+
+func (r *LogsReader) nextMatching() (result.Result[logs.Record], bool) {
+ if r.next == nil {
+ return result.Result[logs.Record]{}, false
+ }
+
+NextRow:
+ res, ok := r.next()
+ if !ok {
+ return res, ok
+ }
+
+ record, err := res.Value()
+ if err != nil {
+ return res, true
+ }
+
+ if r.matchIDs != nil {
+ if _, ok := r.matchIDs[record.StreamID]; !ok {
+ goto NextRow
+ }
+ }
+
+ for key, value := range r.matchers {
+ if getMetadata(record.Metadata, key) != value {
+ goto NextRow
+ }
+ }
+
+ for key, filter := range r.filters {
+ if !filter(key, getMetadata(record.Metadata, key)) {
+ goto NextRow
+ }
+ }
+
+ return res, true
+}
+
+func getMetadata(md push.LabelsAdapter, key string) string {
+ for _, l := range md {
+ if l.Name == key {
+ return l.Value
+ }
+ }
+
+ return ""
+}
+
+func convertMetadata(md push.LabelsAdapter) labels.Labels {
+ l := make(labels.Labels, 0, len(md))
+ for _, label := range md {
+ l = append(l, labels.Label{Name: label.Name, Value: label.Value})
+ }
+ sort.Sort(l)
+ return l
+}
+
+// Reset resets the LogsReader with a new object and section index to read
+// from. Reset allows reusing a LogsReader without allocating a new one.
+//
+// Reset may be called with a nil object and a negative section index to clear
+// the LogsReader without needing a new object.
+func (r *LogsReader) Reset(obj *Object, sectionIndex int) {
+ if r.stop != nil {
+ r.stop()
+ }
+
+ r.obj = obj
+ r.idx = sectionIndex
+ r.next = nil
+ r.stop = nil
+
+ clear(r.matchers)
+ clear(r.filters)
+ clear(r.matchIDs)
+}
diff --git a/pkg/dataobj/logs_reader_test.go b/pkg/dataobj/logs_reader_test.go
new file mode 100644
index 0000000000000..140049df24d61
--- /dev/null
+++ b/pkg/dataobj/logs_reader_test.go
@@ -0,0 +1,183 @@
+package dataobj_test
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "slices"
+ "strings"
+ "testing"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/push"
+
+ "github.com/grafana/loki/v3/pkg/dataobj"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/encoding"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/logs"
+)
+
+var recordsTestdata = []logs.Record{
+ {StreamID: 1, Timestamp: unixTime(10), Metadata: nil, Line: "hello"},
+ {StreamID: 1, Timestamp: unixTime(15), Metadata: metadata("trace_id", "123"), Line: "world"},
+ {StreamID: 2, Timestamp: unixTime(5), Metadata: nil, Line: "hello again"},
+ {StreamID: 2, Timestamp: unixTime(20), Metadata: metadata("user", "12"), Line: "world again"},
+ {StreamID: 3, Timestamp: unixTime(25), Metadata: metadata("user", "14"), Line: "hello one more time"},
+ {StreamID: 3, Timestamp: unixTime(30), Metadata: metadata("trace_id", "123"), Line: "world one more time"},
+}
+
+func metadata(kvps ...string) push.LabelsAdapter {
+ if len(kvps)%2 != 0 {
+ panic("metadata: odd number of key-value pairs")
+ }
+
+ m := make(push.LabelsAdapter, len(kvps)/2)
+ for i := 0; i < len(kvps); i += 2 {
+ m = append(m, push.LabelAdapter{Name: kvps[i], Value: kvps[i+1]})
+ }
+ return m
+}
+
+func TestLogsReader(t *testing.T) {
+ expect := []dataobj.Record{
+ {1, unixTime(10), labels.FromStrings(), "hello"},
+ {1, unixTime(15), labels.FromStrings("trace_id", "123"), "world"},
+ {2, unixTime(5), labels.FromStrings(), "hello again"},
+ {2, unixTime(20), labels.FromStrings("user", "12"), "world again"},
+ {3, unixTime(25), labels.FromStrings("user", "14"), "hello one more time"},
+ {3, unixTime(30), labels.FromStrings("trace_id", "123"), "world one more time"},
+ }
+
+ // Build with many pages but one section.
+ obj := buildLogsObject(t, logs.Options{
+ PageSizeHint: 1,
+ BufferSize: 1,
+ SectionSize: 1024,
+ })
+ md, err := obj.Metadata(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 1, md.LogsSections)
+
+ r := dataobj.NewLogsReader(obj, 0)
+ actual, err := readAllRecords(context.Background(), r)
+ require.NoError(t, err)
+ require.Equal(t, expect, actual)
+}
+
+func TestLogsReader_MatchStreams(t *testing.T) {
+ expect := []dataobj.Record{
+ {1, unixTime(10), labels.FromStrings(), "hello"},
+ {1, unixTime(15), labels.FromStrings("trace_id", "123"), "world"},
+ {3, unixTime(25), labels.FromStrings("user", "14"), "hello one more time"},
+ {3, unixTime(30), labels.FromStrings("trace_id", "123"), "world one more time"},
+ }
+
+ // Build with many pages but one section.
+ obj := buildLogsObject(t, logs.Options{
+ PageSizeHint: 1,
+ BufferSize: 1,
+ SectionSize: 1024,
+ })
+ md, err := obj.Metadata(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 1, md.LogsSections)
+
+ r := dataobj.NewLogsReader(obj, 0)
+ require.NoError(t, r.MatchStreams(slices.Values([]int64{1, 3})))
+
+ actual, err := readAllRecords(context.Background(), r)
+ require.NoError(t, err)
+ require.Equal(t, expect, actual)
+}
+
+func TestLogsReader_AddMetadataMatcher(t *testing.T) {
+ expect := []dataobj.Record{
+ {1, unixTime(15), labels.FromStrings("trace_id", "123"), "world"},
+ {3, unixTime(30), labels.FromStrings("trace_id", "123"), "world one more time"},
+ }
+
+ // Build with many pages but one section.
+ obj := buildLogsObject(t, logs.Options{
+ PageSizeHint: 1,
+ BufferSize: 1,
+ SectionSize: 1024,
+ })
+ md, err := obj.Metadata(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 1, md.LogsSections)
+
+ r := dataobj.NewLogsReader(obj, 0)
+ require.NoError(t, r.AddMetadataMatcher(dataobj.MetadataMatcher{"trace_id", "123"}))
+
+ actual, err := readAllRecords(context.Background(), r)
+ require.NoError(t, err)
+ require.Equal(t, expect, actual)
+}
+
+func TestLogsReader_AddMetadataFilter(t *testing.T) {
+ expect := []dataobj.Record{
+ {2, unixTime(20), labels.FromStrings("user", "12"), "world again"},
+ {3, unixTime(25), labels.FromStrings("user", "14"), "hello one more time"},
+ }
+
+ // Build with many pages but one section.
+ obj := buildLogsObject(t, logs.Options{
+ PageSizeHint: 1,
+ BufferSize: 1,
+ SectionSize: 1024,
+ })
+ md, err := obj.Metadata(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 1, md.LogsSections)
+
+ r := dataobj.NewLogsReader(obj, 0)
+ err = r.AddMetadataFilter("user", func(name, value string) bool {
+ require.Equal(t, "user", name)
+ return strings.HasPrefix(value, "1")
+ })
+ require.NoError(t, err)
+
+ actual, err := readAllRecords(context.Background(), r)
+ require.NoError(t, err)
+ require.Equal(t, expect, actual)
+}
+
+func buildLogsObject(t *testing.T, opts logs.Options) *dataobj.Object {
+ t.Helper()
+
+ s := logs.New(nil, opts)
+ for _, rec := range recordsTestdata {
+ s.Append(rec)
+ }
+
+ var buf bytes.Buffer
+
+ enc := encoding.NewEncoder(&buf)
+ require.NoError(t, s.EncodeTo(enc))
+ require.NoError(t, enc.Flush())
+
+ return dataobj.FromReaderAt(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+}
+
+func readAllRecords(ctx context.Context, r *dataobj.LogsReader) ([]dataobj.Record, error) {
+ var (
+ res []dataobj.Record
+ buf = make([]dataobj.Record, 128)
+ )
+
+ for {
+ n, err := r.Read(ctx, buf)
+ if n > 0 {
+ res = append(res, buf[:n]...)
+ }
+ if errors.Is(err, io.EOF) {
+ return res, nil
+ } else if err != nil {
+ return res, err
+ }
+
+ buf = buf[:0]
+ }
+}
diff --git a/pkg/dataobj/reader.go b/pkg/dataobj/reader.go
deleted file mode 100644
index aa5b02c2bc03e..0000000000000
--- a/pkg/dataobj/reader.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package dataobj
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/thanos-io/objstore"
-
- "github.com/grafana/loki/v3/pkg/dataobj/internal/encoding"
- "github.com/grafana/loki/v3/pkg/dataobj/internal/result"
- "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/logs"
- "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/streams"
- "github.com/grafana/loki/v3/pkg/logproto"
-)
-
-// reader connects to an object storage bucket and supports basic reading from
-// data objects.
-//
-// reader isn't exposed as a public API because it's insufficient for reading
-// at scale; more work is needed to support efficient reads and filtering data.
-// At the moment, reader is only used for tests.
-type reader struct {
- bucket objstore.Bucket
-}
-
-func newReader(bucket objstore.Bucket) *reader {
- return &reader{bucket: bucket}
-}
-
-// Objects returns an iterator over all data objects for the provided tenant.
-func (r *reader) Objects(ctx context.Context, tenant string) result.Seq[string] {
- tenantPath := fmt.Sprintf("tenant-%s/objects/", tenant)
-
- return result.Iter(func(yield func(string) bool) error {
- errIterationStopped := errors.New("iteration stopped")
-
- err := r.bucket.Iter(ctx, tenantPath, func(name string) error {
- if !yield(name) {
- return errIterationStopped
- }
- return nil
- }, objstore.WithRecursiveIter())
-
- switch {
- case errors.Is(err, errIterationStopped):
- return nil
- default:
- return err
- }
- })
-}
-
-// Streams returns an iterator over all [logproto.Stream] entries for the
-// provided object. Each emitted stream contains all logs for that stream in
-// ascending timestamp order. Streams are emitted in in the order they were
-// first appended to the data object.
-func (r *reader) Streams(ctx context.Context, object string) result.Seq[logproto.Stream] {
- return result.Iter(func(yield func(logproto.Stream) bool) error {
- dec := encoding.BucketDecoder(r.bucket, object)
-
- streamRecords, err := result.Collect(streams.Iter(ctx, dec))
- if err != nil {
- return fmt.Errorf("reading streams dataset: %w", err)
- }
- streamRecordLookup := make(map[int64]streams.Stream, len(streamRecords))
- for _, stream := range streamRecords {
- streamRecordLookup[stream.ID] = stream
- }
-
- var (
- lastID int64
- batch logproto.Stream
- )
-
- for result := range logs.Iter(ctx, dec) {
- record, err := result.Value()
- if err != nil {
- return fmt.Errorf("iterating over logs: %w", err)
- }
-
- if lastID != record.StreamID {
- if lastID != 0 && !yield(batch) {
- return nil
- }
-
- streamRecord := streamRecordLookup[record.StreamID]
-
- batch = logproto.Stream{
- Labels: streamRecord.Labels.String(),
- Hash: streamRecord.Labels.Hash(),
- }
-
- lastID = record.StreamID
- }
-
- batch.Entries = append(batch.Entries, logproto.Entry{
- Timestamp: record.Timestamp,
- Line: record.Line,
- StructuredMetadata: record.Metadata,
- })
- }
- if len(batch.Entries) > 0 {
- if !yield(batch) {
- return nil
- }
- }
-
- return nil
- })
-}
diff --git a/pkg/dataobj/streams_reader.go b/pkg/dataobj/streams_reader.go
new file mode 100644
index 0000000000000..12bcd2244bf07
--- /dev/null
+++ b/pkg/dataobj/streams_reader.go
@@ -0,0 +1,248 @@
+package dataobj
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/metadata/filemd"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/result"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/streams"
+)
+
+// Predicates for reading streams.
+type (
+ // LabelMatcher is a predicate for matching labels in a streams section.
+ // LabelMatcher predicates assert that a label named Name exists and its
+ // value is set to Value.
+ //
+ // For equality matches, LabelMatcher should always be used; LabelMatchers
+ // can translate into more efficient filter operations than a [LabelFilter]
+ // can.
+ LabelMatcher struct{ Name, Value string }
+
+ // LabelFilter is a predicate for matching labels in a streams section.
+ // LabelFilter predicates return a true value when the combination of the
+ // provided label name and value should be included in the result.
+ //
+ // LabelFilter predicates should be only used for more complex filtering; for
+ // equality matches, [LabelMatcher]s are more efficient.
+ LabelFilter func(name, value string) bool
+)
+
+// A Stream is an individual stream in a data object.
+type Stream struct {
+ // ID of the stream. Stream IDs are unique across all sections in an object,
+ // but not across multiple objects.
+ ID int64
+
+ // MinTime and MaxTime denote the range of timestamps across all entries in
+ // the stream.
+ MinTime, MaxTime time.Time
+
+ // Labels of the stream.
+ Labels labels.Labels
+}
+
+// StreamsReader reads the set of streams from an [Object].
+type StreamsReader struct {
+ obj *Object
+ idx int
+
+ matchers map[string]string
+ filters map[string]LabelFilter
+
+ next func() (result.Result[streams.Stream], bool)
+ stop func()
+}
+
+// NewStreamsReader creates a new StreamsReader that reads from the streams
+// section of the given object.
+func NewStreamsReader(obj *Object, sectionIndex int) *StreamsReader {
+ var sr StreamsReader
+ sr.Reset(obj, sectionIndex)
+ return &sr
+}
+
+// AddLabelMatcher adds a label matcher to the StreamsReader.
+// [StreamsReader.Read] will only return streams for which the label matcher
+// predicate passes.
+//
+// AddLabelMatcher may only be called before reading begins or after a call to
+// [StreamsReader.Reset].
+func (r *StreamsReader) AddLabelMatcher(m LabelMatcher) error {
+ if r.next != nil {
+ return fmt.Errorf("cannot add label matcher after reading has started")
+ }
+
+ if r.matchers == nil {
+ r.matchers = make(map[string]string)
+ }
+ r.matchers[m.Name] = m.Value
+ return nil
+}
+
+// AddLabelFilter adds a label filter to the StreamsReader.
+// [StreamsReader.Read] will only return streams for which the label filter
+// predicate passes. The filter f will be called with the provided key to allow
+// the same function to be reused for multiple keys.
+//
+// AddLabelFilter may only be called before reading begins or after a call to
+// [StreamsReader.Reset].
+func (r *StreamsReader) AddLabelFilter(key string, f LabelFilter) error {
+ if r.next != nil {
+ return fmt.Errorf("cannot add label filter after reading has started")
+ }
+
+ if r.filters == nil {
+ r.filters = make(map[string]LabelFilter)
+ }
+ r.filters[key] = f
+ return nil
+}
+
+// Read reads up to the next len(s) streams from the reader and stores them
+// into s. It returns the number of streams read and any error encountered. At
+// the end of the stream section, Read returns 0, io.EOF.
+func (r *StreamsReader) Read(ctx context.Context, s []Stream) (int, error) {
+ // TODO(rfratto): The implementation below is the initial, naive approach. It
+ // lacks a few features that will be needed at scale:
+ //
+ // * Read columns/pages in batches of len(s), rather than one row at a time,
+ //
+ // * Add page-level filtering based on min/max page values to quickly filter
+ // out batches of rows without needing to download or decode them.
+ //
+ // * Download pages in batches, rather than one at a time.
+ //
+ // * Only download/decode non-predicate columns following finding rows that
+ // match all predicate columns.
+ //
+ // * Reuse as much memory as possible from a combination of s and the state
+ // of StreamsReader.
+ //
+ // These details can change internally without changing the API exposed by
+ // StreamsReader, which is designed to permit efficient use in the future.
+
+ if r.obj == nil {
+ return 0, io.EOF
+ } else if r.idx < 0 {
+ return 0, fmt.Errorf("invalid section index %d", r.idx)
+ }
+
+ if r.next == nil {
+ err := r.initIter(ctx)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ for i := range s {
+ res, ok := r.nextMatching()
+ if !ok {
+ return i, io.EOF
+ }
+
+ stream, err := res.Value()
+ if err != nil {
+ return i, fmt.Errorf("reading stream: %w", err)
+ }
+
+ s[i] = Stream{
+ ID: stream.ID,
+ MinTime: stream.MinTimestamp,
+ MaxTime: stream.MaxTimestamp,
+ Labels: stream.Labels,
+ }
+ }
+
+ return len(s), nil
+}
+
+func (r *StreamsReader) initIter(ctx context.Context) error {
+ sec, err := r.findSection(ctx)
+ if err != nil {
+ return fmt.Errorf("finding section: %w", err)
+ }
+
+ if r.stop != nil {
+ r.stop()
+ }
+
+ seq := streams.IterSection(ctx, r.obj.dec.StreamsDecoder(), sec)
+ r.next, r.stop = result.Pull(seq)
+ return nil
+}
+
+func (r *StreamsReader) findSection(ctx context.Context) (*filemd.SectionInfo, error) {
+ si, err := r.obj.dec.Sections(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("reading sections: %w", err)
+ }
+
+ var n int
+
+ for _, s := range si {
+ if s.Type == filemd.SECTION_TYPE_STREAMS {
+ if n == r.idx {
+ return s, nil
+ }
+ n++
+ }
+ }
+
+ return nil, fmt.Errorf("section index %d not found", r.idx)
+}
+
+func (r *StreamsReader) nextMatching() (result.Result[streams.Stream], bool) {
+ if r.next == nil {
+ return result.Result[streams.Stream]{}, false
+ }
+
+NextRow:
+ res, ok := r.next()
+ if !ok {
+ return res, ok
+ }
+
+ stream, err := res.Value()
+ if err != nil {
+ return res, true
+ }
+
+ for key, value := range r.matchers {
+ if stream.Labels.Get(key) != value {
+ goto NextRow
+ }
+ }
+
+ for key, filter := range r.filters {
+ if !filter(key, stream.Labels.Get(key)) {
+ goto NextRow
+ }
+ }
+
+ return res, true
+}
+
+// Reset resets the StreamsReader with a new object and section index to read
+// from. Reset allows reusing a StreamsReader without allocating a new one.
+//
+// Reset may be called with a nil object and a negative section index to clear
+// the StreamsReader without needing a new object.
+func (r *StreamsReader) Reset(obj *Object, sectionIndex int) {
+ if r.stop != nil {
+ r.stop()
+ }
+
+ r.obj = obj
+ r.idx = sectionIndex
+ r.next = nil
+ r.stop = nil
+
+ clear(r.matchers)
+ clear(r.filters)
+}
diff --git a/pkg/dataobj/streams_reader_test.go b/pkg/dataobj/streams_reader_test.go
new file mode 100644
index 0000000000000..59ce1a3b45556
--- /dev/null
+++ b/pkg/dataobj/streams_reader_test.go
@@ -0,0 +1,131 @@
+package dataobj_test
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/dataobj"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/encoding"
+ "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/streams"
+)
+
+var streamsTestdata = []struct {
+ Labels labels.Labels
+ Timestamp time.Time
+}{
+ {labels.FromStrings("cluster", "test", "app", "foo"), unixTime(10)},
+ {labels.FromStrings("cluster", "test", "app", "foo"), unixTime(15)},
+ {labels.FromStrings("cluster", "test", "app", "bar"), unixTime(5)},
+ {labels.FromStrings("cluster", "test", "app", "bar"), unixTime(20)},
+ {labels.FromStrings("cluster", "test", "app", "baz"), unixTime(25)},
+ {labels.FromStrings("cluster", "test", "app", "baz"), unixTime(30)},
+}
+
+func TestStreamsReader(t *testing.T) {
+ expect := []dataobj.Stream{
+ {1, unixTime(10), unixTime(15), labels.FromStrings("cluster", "test", "app", "foo")},
+ {2, unixTime(5), unixTime(20), labels.FromStrings("cluster", "test", "app", "bar")},
+ {3, unixTime(25), unixTime(30), labels.FromStrings("cluster", "test", "app", "baz")},
+ }
+
+ obj := buildStreamsObject(t, 1) // Many pages
+ md, err := obj.Metadata(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 1, md.StreamsSections)
+
+ r := dataobj.NewStreamsReader(obj, 0)
+ actual, err := readAllStreams(context.Background(), r)
+ require.NoError(t, err)
+ require.Equal(t, expect, actual)
+}
+
+func TestStreamsReader_AddLabelMatcher(t *testing.T) {
+ expect := []dataobj.Stream{
+ {2, unixTime(5), unixTime(20), labels.FromStrings("cluster", "test", "app", "bar")},
+ }
+
+ obj := buildStreamsObject(t, 1) // Many pages
+ md, err := obj.Metadata(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 1, md.StreamsSections)
+
+ r := dataobj.NewStreamsReader(obj, 0)
+ require.NoError(t, r.AddLabelMatcher(dataobj.LabelMatcher{Name: "app", Value: "bar"}))
+
+ actual, err := readAllStreams(context.Background(), r)
+ require.NoError(t, err)
+ require.Equal(t, expect, actual)
+}
+
+func TestStreamsReader_AddLabelFilter(t *testing.T) {
+ expect := []dataobj.Stream{
+ {2, unixTime(5), unixTime(20), labels.FromStrings("cluster", "test", "app", "bar")},
+ {3, unixTime(25), unixTime(30), labels.FromStrings("cluster", "test", "app", "baz")},
+ }
+
+ obj := buildStreamsObject(t, 1) // Many pages
+ md, err := obj.Metadata(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 1, md.StreamsSections)
+
+ r := dataobj.NewStreamsReader(obj, 0)
+ err = r.AddLabelFilter("app", func(key string, value string) bool {
+ require.Equal(t, "app", key)
+ return strings.HasPrefix(value, "b")
+ })
+ require.NoError(t, err)
+
+ actual, err := readAllStreams(context.Background(), r)
+ require.NoError(t, err)
+ require.Equal(t, expect, actual)
+}
+
+func unixTime(sec int64) time.Time {
+ return time.Unix(sec, 0).UTC()
+}
+
+func buildStreamsObject(t *testing.T, pageSize int) *dataobj.Object {
+ t.Helper()
+
+ s := streams.New(nil, pageSize)
+ for _, d := range streamsTestdata {
+ s.Record(d.Labels, d.Timestamp)
+ }
+
+ var buf bytes.Buffer
+
+ enc := encoding.NewEncoder(&buf)
+ require.NoError(t, s.EncodeTo(enc))
+ require.NoError(t, enc.Flush())
+
+ return dataobj.FromReaderAt(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+}
+
+func readAllStreams(ctx context.Context, r *dataobj.StreamsReader) ([]dataobj.Stream, error) {
+ var (
+ res []dataobj.Stream
+ buf = make([]dataobj.Stream, 128)
+ )
+
+ for {
+ n, err := r.Read(ctx, buf)
+ if n > 0 {
+ res = append(res, buf[:n]...)
+ }
+ if errors.Is(err, io.EOF) {
+ return res, nil
+ } else if err != nil {
+ return res, err
+ }
+
+ buf = buf[:0]
+ }
+}
|
chore
|
add initial high-level APIs for reading streams and log records (#15974)
|
e9a9c60c22e78b52c0c046d379b4b2b986d91dca
|
2024-07-31 02:43:13
|
Joel Verezhak
|
fix: protect ruler remote-write overrides map with a mutex when creating new appenders (#13676)
| false
|
diff --git a/pkg/ruler/registry.go b/pkg/ruler/registry.go
index 868b7f29a6f94..29297fcab5a74 100644
--- a/pkg/ruler/registry.go
+++ b/pkg/ruler/registry.go
@@ -128,8 +128,12 @@ func (r *walRegistry) get(tenant string) storage.Storage {
}
func (r *walRegistry) Appender(ctx context.Context) storage.Appender {
+ // concurrency-safe retrieval of remote-write config for this tenant, using the global remote-write for defaults
+ r.overridesMu.Lock()
tenant, _ := user.ExtractOrgID(ctx)
rwCfg, err := r.getTenantRemoteWriteConfig(tenant, r.config.RemoteWrite)
+ r.overridesMu.Unlock()
+
if err != nil {
level.Error(r.logger).Log("msg", "error retrieving remote-write config; discarding samples", "user", tenant, "err", err)
return discardingAppender{}
diff --git a/pkg/ruler/registry_test.go b/pkg/ruler/registry_test.go
index 261b6d3836763..7ab12d8962ae6 100644
--- a/pkg/ruler/registry_test.go
+++ b/pkg/ruler/registry_test.go
@@ -405,6 +405,30 @@ func TestTenantRemoteWriteConfigWithOverrideConcurrentAccess(t *testing.T) {
})
}
+func TestAppenderConcurrentAccess(t *testing.T) {
+ require.NotPanics(t, func() {
+ reg := setupRegistry(t, cfg, newFakeLimits())
+ var wg sync.WaitGroup
+ for i := 0; i < 1000; i++ {
+ wg.Add(1)
+ go func(reg *walRegistry) {
+ defer wg.Done()
+
+ _ = reg.Appender(user.InjectOrgID(context.Background(), enabledRWTenant))
+ }(reg)
+
+ wg.Add(1)
+ go func(reg *walRegistry) {
+ defer wg.Done()
+
+ _ = reg.Appender(user.InjectOrgID(context.Background(), additionalHeadersRWTenant))
+ }(reg)
+ }
+
+ wg.Wait()
+ })
+}
+
func TestTenantRemoteWriteConfigWithoutOverride(t *testing.T) {
reg := setupRegistry(t, backCompatCfg, newFakeLimitsBackwardCompat())
|
fix
|
protect ruler remote-write overrides map with a mutex when creating new appenders (#13676)
|
1f9beae0cce4d3ca939bd54d7da0c27d79e0864a
|
2024-11-01 18:56:01
|
renovate[bot]
|
chore(deps): update terraform google to v6.9.0 (#14718)
| false
|
diff --git a/tools/gcplog/main.tf b/tools/gcplog/main.tf
index 73217fd10ca68..69425eaa47d77 100644
--- a/tools/gcplog/main.tf
+++ b/tools/gcplog/main.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
google = {
source = "hashicorp/google"
- version = "6.5.0"
+ version = "6.9.0"
}
}
}
|
chore
|
update terraform google to v6.9.0 (#14718)
|
a08ee68deaa3def2018e4ec865b3197e17df3c85
|
2024-06-12 01:34:27
|
Adam Matthews
|
docs: Update Get-Started Labels (#13188)
| false
|
diff --git a/docs/sources/get-started/labels/_index.md b/docs/sources/get-started/labels/_index.md
index e33f36d91f419..db918450bd9e1 100644
--- a/docs/sources/get-started/labels/_index.md
+++ b/docs/sources/get-started/labels/_index.md
@@ -37,7 +37,7 @@ Loki places the same restrictions on label naming as [Prometheus](https://promet
This series of examples will illustrate basic use cases and concepts for labeling in Loki.
-Let's take an example:
+Let's take an example Promtail/Alloy config file:
```yaml
scrape_configs:
|
docs
|
Update Get-Started Labels (#13188)
|
5cf193854466d6373befbd3cdff74df90ee22890
|
2024-11-07 05:49:30
|
Trevor Whitney
|
ci: fix the helm ci (#14804)
| false
|
diff --git a/.github/workflows/helm-ci.yml b/.github/workflows/helm-ci.yml
index 5aae04db31067..f056c14bcb83e 100644
--- a/.github/workflows/helm-ci.yml
+++ b/.github/workflows/helm-ci.yml
@@ -85,10 +85,12 @@ jobs:
run: |
kubectl create namespace prometheus
- helm install prometheus prometheus-community/kube-prometheus-stack \
+ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+ helm install --debug prometheus prometheus-community/kube-prometheus-stack \
--namespace prometheus \
--set grafana.enabled=false \
- --set prometheus.prometheusSpec.serviceMonitorSelector.matchLabels.release=prometheus
+ --set prometheus.prometheusSpec.serviceMonitorSelector.matchLabels.release=prometheus \
+ --set prometheus.prometheusSpec.scrapeConfigNamespaceSelector.matchLabels.release=prometheus
kubectl --namespace prometheus get pods -l "release=prometheus"
kubectl --namespace prometheus get services -l "release=prometheus"
|
ci
|
fix the helm ci (#14804)
|
42dccae68fc5a63526ebda786cbe8cefeedb8256
|
2020-06-22 20:47:26
|
Calle Pettersson
|
docs: Fix missing quotes (#2247)
| false
|
diff --git a/docs/logql.md b/docs/logql.md
index 4c83b142373cb..9cd0afa636736 100644
--- a/docs/logql.md
+++ b/docs/logql.md
@@ -69,7 +69,7 @@ After writing the log stream selector, the resulting set of logs can be further
- `{job="mysql"} |= "error"`
- `{name="kafka"} |~ "tsdb-ops.*io:2003"`
- `` {name="cassandra"} |~ `error=\w+` ``
-- `{instance=~"kafka-[23]",name="kafka"} != kafka.server:type=ReplicaManager`
+- `{instance=~"kafka-[23]",name="kafka"} != "kafka.server:type=ReplicaManager"`
In the previous examples, `|=`, `|~`, and `!=` act as **filter operators** and
the following filter operators are supported:
|
docs
|
Fix missing quotes (#2247)
|
f1fb331c1acc5300a490aec7ae739e3017ada60e
|
2025-03-20 04:45:11
|
renovate[bot]
|
chore(deps): update dependency @types/react to v19.0.12 (main) (#16836)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index 1002ba5a35ead..9dc3ea9930b79 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -2768,9 +2768,9 @@
"license": "MIT"
},
"node_modules/@types/react": {
- "version": "19.0.11",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.11.tgz",
- "integrity": "sha512-vrdxRZfo9ALXth6yPfV16PYTLZwsUWhVjjC+DkfE5t1suNSbBrWC9YqSuuxJZ8Ps6z1o2ycRpIqzZJIgklq4Tw==",
+ "version": "19.0.12",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.0.12.tgz",
+ "integrity": "sha512-V6Ar115dBDrjbtXSrS+/Oruobc+qVbbUxDFC1RSbRqLt5SYvxxyIDrSC85RWml54g+jfNeEMZhEj7wW07ONQhA==",
"devOptional": true,
"license": "MIT",
"dependencies": {
|
chore
|
update dependency @types/react to v19.0.12 (main) (#16836)
|
b87224647dc88901c61cb4bd571dfda9405a7826
|
2024-10-28 10:24:48
|
Joao Marcal
|
feat(storage): AWS backend using thanos.io/objstore (#11221)
| false
|
diff --git a/pkg/storage/bucket/client.go b/pkg/storage/bucket/client.go
index 0bd8ec4e92e53..06f8d128f850d 100644
--- a/pkg/storage/bucket/client.go
+++ b/pkg/storage/bucket/client.go
@@ -84,10 +84,9 @@ func (cfg *StorageBackendConfig) RegisterFlagsWithPrefix(prefix string, f *flag.
}
func (cfg *StorageBackendConfig) Validate() error {
- // TODO: enable validation when s3 flags are registered
- // if err := cfg.S3.Validate(); err != nil {
- // return err
- //}
+ if err := cfg.S3.Validate(); err != nil {
+ return err
+ }
return nil
}
diff --git a/pkg/storage/bucket/s3/bucket_client.go b/pkg/storage/bucket/s3/bucket_client.go
index 107e18b3c7bb0..5d904d8e5fe9b 100644
--- a/pkg/storage/bucket/s3/bucket_client.go
+++ b/pkg/storage/bucket/s3/bucket_client.go
@@ -4,6 +4,7 @@ import (
"github.com/go-kit/log"
"github.com/prometheus/common/model"
"github.com/thanos-io/objstore"
+ "github.com/thanos-io/objstore/exthttp"
"github.com/thanos-io/objstore/providers/s3"
)
@@ -38,17 +39,28 @@ func newS3Config(cfg Config) (s3.Config, error) {
return s3.Config{}, err
}
+ putUserMetadata := map[string]string{}
+
+ if cfg.StorageClass != "" {
+ putUserMetadata[awsStorageClassHeader] = cfg.StorageClass
+ }
+
return s3.Config{
- Bucket: cfg.BucketName,
- Endpoint: cfg.Endpoint,
- Region: cfg.Region,
- AccessKey: cfg.AccessKeyID,
- SecretKey: cfg.SecretAccessKey.String(),
- SessionToken: cfg.SessionToken.String(),
- Insecure: cfg.Insecure,
- DisableDualstack: cfg.DisableDualstack,
- SSEConfig: sseCfg,
- PutUserMetadata: map[string]string{awsStorageClassHeader: cfg.StorageClass},
+ Bucket: cfg.BucketName,
+ Endpoint: cfg.Endpoint,
+ Region: cfg.Region,
+ AccessKey: cfg.AccessKeyID,
+ SecretKey: cfg.SecretAccessKey.String(),
+ SessionToken: cfg.SessionToken.String(),
+ Insecure: cfg.Insecure,
+ PutUserMetadata: putUserMetadata,
+ SendContentMd5: cfg.SendContentMd5,
+ SSEConfig: sseCfg,
+ DisableDualstack: !cfg.DualstackEnabled,
+ ListObjectsVersion: cfg.ListObjectsVersion,
+ BucketLookupType: cfg.BucketLookupType,
+ AWSSDKAuth: cfg.NativeAWSAuthEnabled,
+ PartSize: cfg.PartSize,
HTTPConfig: s3.HTTPConfig{
IdleConnTimeout: model.Duration(cfg.HTTP.IdleConnTimeout),
ResponseHeaderTimeout: model.Duration(cfg.HTTP.ResponseHeaderTimeout),
@@ -59,6 +71,16 @@ func newS3Config(cfg Config) (s3.Config, error) {
MaxIdleConnsPerHost: cfg.HTTP.MaxIdleConnsPerHost,
MaxConnsPerHost: cfg.HTTP.MaxConnsPerHost,
Transport: cfg.HTTP.Transport,
+ TLSConfig: exthttp.TLSConfig{
+ CAFile: cfg.HTTP.TLSConfig.CAPath,
+ CertFile: cfg.HTTP.TLSConfig.CertPath,
+ KeyFile: cfg.HTTP.TLSConfig.KeyPath,
+ ServerName: cfg.HTTP.TLSConfig.ServerName,
+ },
+ },
+ TraceConfig: s3.TraceConfig{
+ Enable: cfg.TraceConfig.Enabled,
},
+ STSEndpoint: cfg.STSEndpoint,
}, nil
}
diff --git a/pkg/storage/bucket/s3/config.go b/pkg/storage/bucket/s3/config.go
index 32db169f450f6..792f93f752b32 100644
--- a/pkg/storage/bucket/s3/config.go
+++ b/pkg/storage/bucket/s3/config.go
@@ -5,23 +5,20 @@ import (
"flag"
"fmt"
"net/http"
+ "slices"
"strings"
+ "time"
+ s3_service "github.com/aws/aws-sdk-go/service/s3"
"github.com/grafana/dskit/flagext"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/pkg/errors"
"github.com/thanos-io/objstore/providers/s3"
- bucket_http "github.com/grafana/loki/v3/pkg/storage/bucket/http"
- "github.com/grafana/loki/v3/pkg/storage/common/aws"
"github.com/grafana/loki/v3/pkg/util"
)
const (
- // Signature Version 2 is being turned off (deprecated) in Amazon S3. Amazon S3 will then only accept API requests that are signed using Signature Version 4.
- // https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingAWSSDK.html#UsingAWSSDK-sig2-deprecation
- SignatureVersionV4 = "v4"
-
// SSEKMS config type constant to configure S3 server side encryption using KMS
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html
SSEKMS = "SSE-KMS"
@@ -32,41 +29,99 @@ const (
)
var (
- supportedSignatureVersions = []string{SignatureVersionV4}
- supportedSSETypes = []string{SSEKMS, SSES3}
- errUnsupportedSignatureVersion = errors.New("unsupported signature version")
- errUnsupportedSSEType = errors.New("unsupported S3 SSE type")
- errInvalidSSEContext = errors.New("invalid S3 SSE encryption context")
+ supportedSSETypes = []string{SSEKMS, SSES3}
+ supportedStorageClasses = s3_service.ObjectStorageClass_Values()
+ supportedBucketLookupTypes = thanosS3BucketLookupTypesValues()
+
+ errUnsupportedSSEType = errors.New("unsupported S3 SSE type")
+ errUnsupportedStorageClass = fmt.Errorf("unsupported S3 storage class (supported values: %s)", strings.Join(supportedStorageClasses, ", "))
+ errInvalidSSEContext = errors.New("invalid S3 SSE encryption context")
+ errInvalidEndpointPrefix = errors.New("the endpoint must not prefixed with the bucket name")
+ errInvalidSTSEndpoint = errors.New("sts-endpoint must be a valid url")
)
+var thanosS3BucketLookupTypes = map[string]s3.BucketLookupType{
+ s3.AutoLookup.String(): s3.AutoLookup,
+ s3.VirtualHostLookup.String(): s3.VirtualHostLookup,
+ s3.PathLookup.String(): s3.PathLookup,
+}
+
+func thanosS3BucketLookupTypesValues() (list []string) {
+ for k := range thanosS3BucketLookupTypes {
+ list = append(list, k)
+ }
+ // sort the list for consistent output in help, where it's used
+ slices.Sort(list)
+ return list
+}
+
// HTTPConfig stores the http.Transport configuration for the s3 minio client.
type HTTPConfig struct {
- bucket_http.Config `yaml:",inline"`
+ IdleConnTimeout time.Duration `yaml:"idle_conn_timeout" category:"advanced"`
+ ResponseHeaderTimeout time.Duration `yaml:"response_header_timeout" category:"advanced"`
+ InsecureSkipVerify bool `yaml:"insecure_skip_verify" category:"advanced"`
+ TLSHandshakeTimeout time.Duration `yaml:"tls_handshake_timeout" category:"advanced"`
+ ExpectContinueTimeout time.Duration `yaml:"expect_continue_timeout" category:"advanced"`
+ MaxIdleConns int `yaml:"max_idle_connections" category:"advanced"`
+ MaxIdleConnsPerHost int `yaml:"max_idle_connections_per_host" category:"advanced"`
+ MaxConnsPerHost int `yaml:"max_connections_per_host" category:"advanced"`
// Allow upstream callers to inject a round tripper
Transport http.RoundTripper `yaml:"-"`
+
+ TLSConfig TLSConfig `yaml:",inline"`
+}
+
+// TLSConfig configures the options for TLS connections.
+type TLSConfig struct {
+ CAPath string `yaml:"tls_ca_path" category:"advanced"`
+ CertPath string `yaml:"tls_cert_path" category:"advanced"`
+ KeyPath string `yaml:"tls_key_path" category:"advanced"`
+ ServerName string `yaml:"tls_server_name" category:"advanced"`
}
// RegisterFlagsWithPrefix registers the flags for s3 storage with the provided prefix
func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
- cfg.Config.RegisterFlagsWithPrefix(prefix+"s3.", f)
+ f.DurationVar(&cfg.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.")
+ f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.")
+ f.BoolVar(&cfg.InsecureSkipVerify, prefix+"s3.http.insecure-skip-verify", false, "If the client connects to S3 via HTTPS and this option is enabled, the client will accept any certificate and hostname.")
+ f.DurationVar(&cfg.TLSHandshakeTimeout, prefix+"s3.tls-handshake-timeout", 10*time.Second, "Maximum time to wait for a TLS handshake. 0 means no limit.")
+ f.DurationVar(&cfg.ExpectContinueTimeout, prefix+"s3.expect-continue-timeout", 1*time.Second, "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.")
+ f.IntVar(&cfg.MaxIdleConns, prefix+"s3.max-idle-connections", 100, "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.")
+ f.IntVar(&cfg.MaxIdleConnsPerHost, prefix+"s3.max-idle-connections-per-host", 100, "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.")
+ f.IntVar(&cfg.MaxConnsPerHost, prefix+"s3.max-connections-per-host", 0, "Maximum number of connections per host. 0 means no limit.")
+ cfg.TLSConfig.RegisterFlagsWithPrefix(prefix, f)
+}
+
+// RegisterFlagsWithPrefix registers the flags for s3 storage with the provided prefix.
+func (cfg *TLSConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.StringVar(&cfg.CAPath, prefix+"s3.http.tls-ca-path", "", "Path to the CA certificates to validate server certificate against. If not set, the host's root CA certificates are used.")
+ f.StringVar(&cfg.CertPath, prefix+"s3.http.tls-cert-path", "", "Path to the client certificate, which will be used for authenticating with the server. Also requires the key path to be configured.")
+ f.StringVar(&cfg.KeyPath, prefix+"s3.http.tls-key-path", "", "Path to the key for the client certificate. Also requires the client certificate to be configured.")
+ f.StringVar(&cfg.ServerName, prefix+"s3.http.tls-server-name", "", "Override the expected name on the server certificate.")
}
// Config holds the config options for an S3 backend
type Config struct {
- Endpoint string `yaml:"endpoint"`
- Region string `yaml:"region"`
- BucketName string `yaml:"bucket_name"`
- SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
- SessionToken flagext.Secret `yaml:"session_token"`
- AccessKeyID string `yaml:"access_key_id"`
- Insecure bool `yaml:"insecure"`
- DisableDualstack bool `yaml:"disable_dualstack"`
- SignatureVersion string `yaml:"signature_version"`
- StorageClass string `yaml:"storage_class"`
+ Endpoint string `yaml:"endpoint"`
+ Region string `yaml:"region"`
+ BucketName string `yaml:"bucket_name"`
+ SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
+ AccessKeyID string `yaml:"access_key_id"`
+ SessionToken flagext.Secret `yaml:"session_token"`
+ Insecure bool `yaml:"insecure" category:"advanced"`
+ ListObjectsVersion string `yaml:"list_objects_version" category:"advanced"`
+ BucketLookupType s3.BucketLookupType `yaml:"bucket_lookup_type" category:"advanced"`
+ DualstackEnabled bool `yaml:"dualstack_enabled" category:"experimental"`
+ StorageClass string `yaml:"storage_class" category:"experimental"`
+ NativeAWSAuthEnabled bool `yaml:"native_aws_auth_enabled" category:"experimental"`
+ PartSize uint64 `yaml:"part_size" category:"experimental"`
+ SendContentMd5 bool `yaml:"send_content_md5" category:"experimental"`
+ STSEndpoint string `yaml:"sts_endpoint"`
- SSE SSEConfig `yaml:"sse"`
- HTTP HTTPConfig `yaml:"http"`
+ SSE SSEConfig `yaml:"sse"`
+ HTTP HTTPConfig `yaml:"http"`
+ TraceConfig TraceConfig `yaml:"trace"`
}
// RegisterFlags registers the flags for s3 storage with the provided prefix
@@ -83,21 +138,32 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&cfg.Region, prefix+"s3.region", "", "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.")
f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.")
f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.")
- f.BoolVar(&cfg.DisableDualstack, prefix+"s3.disable-dualstack", false, "Disable forcing S3 dualstack endpoint usage.")
- f.StringVar(&cfg.SignatureVersion, prefix+"s3.signature-version", SignatureVersionV4, fmt.Sprintf("The signature version to use for authenticating against S3. Supported values are: %s.", strings.Join(supportedSignatureVersions, ", ")))
- f.StringVar(&cfg.StorageClass, prefix+"s3.storage-class", aws.StorageClassStandard, "The S3 storage class to use. Details can be found at https://aws.amazon.com/s3/storage-classes/.")
+ f.StringVar(&cfg.ListObjectsVersion, prefix+"s3.list-objects-version", "", "Use a specific version of the S3 list object API. Supported values are v1 or v2. Default is unset.")
+ f.StringVar(&cfg.StorageClass, prefix+"s3.storage-class", "", "The S3 storage class to use, not set by default. Details can be found at https://aws.amazon.com/s3/storage-classes/. Supported values are: "+strings.Join(supportedStorageClasses, ", "))
+ f.BoolVar(&cfg.NativeAWSAuthEnabled, prefix+"s3.native-aws-auth-enabled", false, "If enabled, it will use the default authentication methods of the AWS SDK for go based on known environment variables and known AWS config files.")
+ f.Uint64Var(&cfg.PartSize, prefix+"s3.part-size", 0, "The minimum file size in bytes used for multipart uploads. If 0, the value is optimally computed for each object.")
+ f.BoolVar(&cfg.SendContentMd5, prefix+"s3.send-content-md5", false, "If enabled, a Content-MD5 header is sent with S3 Put Object requests. Consumes more resources to compute the MD5, but may improve compatibility with object storage services that do not support checksums.")
+ f.Var(newBucketLookupTypeValue(s3.AutoLookup, &cfg.BucketLookupType), prefix+"s3.bucket-lookup-type", fmt.Sprintf("Bucket lookup style type, used to access bucket in S3-compatible service. Default is auto. Supported values are: %s.", strings.Join(supportedBucketLookupTypes, ", ")))
+ f.BoolVar(&cfg.DualstackEnabled, prefix+"s3.dualstack-enabled", true, "When enabled, direct all AWS S3 requests to the dual-stack IPv4/IPv6 endpoint for the configured region.")
+ f.StringVar(&cfg.STSEndpoint, prefix+"s3.sts-endpoint", "", "Accessing S3 resources using temporary, secure credentials provided by AWS Security Token Service.")
cfg.SSE.RegisterFlagsWithPrefix(prefix+"s3.sse.", f)
cfg.HTTP.RegisterFlagsWithPrefix(prefix, f)
+ cfg.TraceConfig.RegisterFlagsWithPrefix(prefix+"s3.trace.", f)
}
// Validate config and returns error on failure
func (cfg *Config) Validate() error {
- if !util.StringsContain(supportedSignatureVersions, cfg.SignatureVersion) {
- return errUnsupportedSignatureVersion
+ if cfg.Endpoint != "" {
+ endpoint := strings.Split(cfg.Endpoint, ".")
+ if cfg.BucketName != "" && endpoint[0] != "" && endpoint[0] == cfg.BucketName {
+ return errInvalidEndpointPrefix
+ }
}
-
- if err := aws.ValidateStorageClass(cfg.StorageClass); err != nil {
- return err
+ if cfg.STSEndpoint != "" && !util.IsValidURL(cfg.STSEndpoint) {
+ return errInvalidSTSEndpoint
+ }
+ if !slices.Contains(supportedStorageClasses, cfg.StorageClass) && cfg.StorageClass != "" {
+ return errUnsupportedStorageClass
}
return cfg.SSE.Validate()
@@ -191,3 +257,35 @@ func parseKMSEncryptionContext(data string) (map[string]string, error) {
err := errors.Wrap(json.Unmarshal([]byte(data), &decoded), "unable to parse KMS encryption context")
return decoded, err
}
+
+type TraceConfig struct {
+ Enabled bool `yaml:"enabled" category:"advanced"`
+}
+
+func (cfg *TraceConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.BoolVar(&cfg.Enabled, prefix+"enabled", false, "When enabled, low-level S3 HTTP operation information is logged at the debug level.")
+}
+
+// bucketLookupTypeValue is an adapter between s3.BucketLookupType and flag.Value.
+type bucketLookupTypeValue s3.BucketLookupType
+
+func newBucketLookupTypeValue(value s3.BucketLookupType, p *s3.BucketLookupType) *bucketLookupTypeValue {
+ *p = value
+ return (*bucketLookupTypeValue)(p)
+}
+
+func (v *bucketLookupTypeValue) String() string {
+ if v == nil {
+ return s3.AutoLookup.String()
+ }
+ return s3.BucketLookupType(*v).String()
+}
+
+func (v *bucketLookupTypeValue) Set(s string) error {
+ t, ok := thanosS3BucketLookupTypes[s]
+ if !ok {
+ return fmt.Errorf("unsupported bucket lookup type: %s", s)
+ }
+ *v = bucketLookupTypeValue(t)
+ return nil
+}
diff --git a/pkg/storage/bucket/s3/config_test.go b/pkg/storage/bucket/s3/config_test.go
index 3f32e8f847936..078353b68bd86 100644
--- a/pkg/storage/bucket/s3/config_test.go
+++ b/pkg/storage/bucket/s3/config_test.go
@@ -1,127 +1,23 @@
+// SPDX-License-Identifier: AGPL-3.0-only
+// Provenance-includes-location: https://github.com/cortexproject/cortex/blob/master/pkg/storage/bucket/s3/config_test.go
+// Provenance-includes-license: Apache-2.0
+// Provenance-includes-copyright: The Cortex Authors.
+
package s3
import (
+ "bytes"
"encoding/base64"
- "fmt"
"net/http"
- "strings"
"testing"
- "time"
+ s3_service "github.com/aws/aws-sdk-go/service/s3"
"github.com/grafana/dskit/flagext"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "gopkg.in/yaml.v2"
-
- bucket_http "github.com/grafana/loki/v3/pkg/storage/bucket/http"
- "github.com/grafana/loki/v3/pkg/storage/common/aws"
+ "gopkg.in/yaml.v3"
)
-// defaultConfig should match the default flag values defined in RegisterFlagsWithPrefix.
-var defaultConfig = Config{
- SignatureVersion: SignatureVersionV4,
- StorageClass: aws.StorageClassStandard,
- HTTP: HTTPConfig{
- Config: bucket_http.Config{
- IdleConnTimeout: 90 * time.Second,
- ResponseHeaderTimeout: 2 * time.Minute,
- InsecureSkipVerify: false,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- MaxIdleConns: 100,
- MaxIdleConnsPerHost: 100,
- MaxConnsPerHost: 0,
- },
- },
-}
-
-func TestConfig(t *testing.T) {
- t.Parallel()
-
- tests := map[string]struct {
- config string
- expectedConfig Config
- expectedErr error
- }{
- "default config": {
- config: "",
- expectedConfig: defaultConfig,
- expectedErr: nil,
- },
- "custom config": {
- config: `
-endpoint: test-endpoint
-region: test-region
-bucket_name: test-bucket-name
-secret_access_key: test-secret-access-key
-access_key_id: test-access-key-id
-insecure: true
-signature_version: test-signature-version
-storage_class: test-storage-class
-disable_dualstack: true
-sse:
- type: test-type
- kms_key_id: test-kms-key-id
- kms_encryption_context: test-kms-encryption-context
-http:
- idle_conn_timeout: 2s
- response_header_timeout: 3s
- insecure_skip_verify: true
- tls_handshake_timeout: 4s
- expect_continue_timeout: 5s
- max_idle_connections: 6
- max_idle_connections_per_host: 7
- max_connections_per_host: 8
-`,
- expectedConfig: Config{
- Endpoint: "test-endpoint",
- Region: "test-region",
- BucketName: "test-bucket-name",
- SecretAccessKey: flagext.SecretWithValue("test-secret-access-key"),
- AccessKeyID: "test-access-key-id",
- Insecure: true,
- SignatureVersion: "test-signature-version",
- StorageClass: "test-storage-class",
- DisableDualstack: true,
- SSE: SSEConfig{
- Type: "test-type",
- KMSKeyID: "test-kms-key-id",
- KMSEncryptionContext: "test-kms-encryption-context",
- },
- HTTP: HTTPConfig{
- Config: bucket_http.Config{
- IdleConnTimeout: 2 * time.Second,
- ResponseHeaderTimeout: 3 * time.Second,
- InsecureSkipVerify: true,
- TLSHandshakeTimeout: 4 * time.Second,
- ExpectContinueTimeout: 5 * time.Second,
- MaxIdleConns: 6,
- MaxIdleConnsPerHost: 7,
- MaxConnsPerHost: 8,
- },
- },
- },
- expectedErr: nil,
- },
- "invalid type": {
- config: `insecure: foo`,
- expectedConfig: defaultConfig,
- expectedErr: &yaml.TypeError{Errors: []string{"line 1: cannot unmarshal !!str `foo` into bool"}},
- },
- }
-
- for testName, testData := range tests {
- t.Run(testName, func(t *testing.T) {
- cfg := Config{}
- flagext.DefaultValues(&cfg)
-
- err := yaml.Unmarshal([]byte(testData.config), &cfg)
- require.Equal(t, testData.expectedErr, err)
- require.Equal(t, testData.expectedConfig, cfg)
- })
- }
-}
-
func TestSSEConfig_Validate(t *testing.T) {
tests := map[string]struct {
setup func() *SSEConfig
@@ -169,6 +65,85 @@ func TestSSEConfig_Validate(t *testing.T) {
}
}
+func TestConfig_Validate(t *testing.T) {
+ tests := map[string]struct {
+ setup func() *Config
+ expected error
+ }{
+ "should pass with default config": {
+ setup: func() *Config {
+ sseCfg := &SSEConfig{}
+ flagext.DefaultValues(sseCfg)
+ cfg := &Config{
+ Endpoint: "s3.eu-central-1.amazonaws.com",
+ BucketName: "mimir-block",
+ SSE: *sseCfg,
+ StorageClass: s3_service.StorageClassStandard,
+ }
+ return cfg
+ },
+ },
+ "should fail if invalid storage class is set": {
+ setup: func() *Config {
+ return &Config{
+ StorageClass: "foo",
+ }
+ },
+ expected: errUnsupportedStorageClass,
+ },
+ "should fail on invalid endpoint prefix": {
+ setup: func() *Config {
+ return &Config{
+ Endpoint: "mimir-blocks.s3.eu-central-1.amazonaws.com",
+ BucketName: "mimir-blocks",
+ StorageClass: s3_service.StorageClassStandard,
+ }
+ },
+ expected: errInvalidEndpointPrefix,
+ },
+ "should pass if native_aws_auth_enabled is set": {
+ setup: func() *Config {
+ return &Config{
+ NativeAWSAuthEnabled: true,
+ }
+ },
+ },
+ "should pass with using sts endpoint": {
+ setup: func() *Config {
+ sseCfg := &SSEConfig{}
+ flagext.DefaultValues(sseCfg)
+ cfg := &Config{
+ BucketName: "mimir-block",
+ SSE: *sseCfg,
+ StorageClass: s3_service.StorageClassStandard,
+ STSEndpoint: "https://sts.eu-central-1.amazonaws.com",
+ }
+ return cfg
+ },
+ },
+ "should not pass with using sts endpoint as its using an invalid url": {
+ setup: func() *Config {
+ sseCfg := &SSEConfig{}
+ flagext.DefaultValues(sseCfg)
+ cfg := &Config{
+ BucketName: "mimir-block",
+ SSE: *sseCfg,
+ StorageClass: s3_service.StorageClassStandard,
+ STSEndpoint: "sts.eu-central-1.amazonaws.com",
+ }
+ return cfg
+ },
+ expected: errInvalidSTSEndpoint,
+ },
+ }
+
+ for testName, testData := range tests {
+ t.Run(testName, func(t *testing.T) {
+ assert.Equal(t, testData.expected, testData.setup().Validate())
+ })
+ }
+}
+
func TestSSEConfig_BuildMinioConfig(t *testing.T) {
tests := map[string]struct {
cfg *SSEConfig
@@ -225,31 +200,32 @@ func TestParseKMSEncryptionContext(t *testing.T) {
assert.Equal(t, expected, actual)
}
-func TestConfig_Validate(t *testing.T) {
- tests := map[string]struct {
- cfg Config
- expectedErr error
- }{
- "should fail if invalid signature version is set": {
- Config{SignatureVersion: "foo"},
- errUnsupportedSignatureVersion,
- },
- "should pass if valid signature version is set": {
- defaultConfig,
- nil,
- },
- "should fail if invalid storage class is set": {
- Config{SignatureVersion: SignatureVersionV4, StorageClass: "foo"},
- fmt.Errorf("unsupported S3 storage class: foo. Supported values: %s", strings.Join(aws.SupportedStorageClasses, ", ")),
- },
- "should pass if valid storage signature version is set": {
- Config{SignatureVersion: SignatureVersionV4, StorageClass: aws.StorageClassStandardInfrequentAccess},
- nil,
- },
- }
+func TestConfigParsesCredentialsInlineWithSessionToken(t *testing.T) {
+ var cfg = Config{}
+ yamlCfg := `
+access_key_id: access key id
+secret_access_key: secret access key
+session_token: session token
+`
+ err := yaml.Unmarshal([]byte(yamlCfg), &cfg)
+ require.NoError(t, err)
+
+ require.Equal(t, cfg.AccessKeyID, "access key id")
+ require.Equal(t, cfg.SecretAccessKey.String(), "secret access key")
+ require.Equal(t, cfg.SessionToken.String(), "session token")
+}
- for name, test := range tests {
- actual := test.cfg.Validate()
- assert.Equal(t, test.expectedErr, actual, name)
+func TestConfigRedactsCredentials(t *testing.T) {
+ cfg := Config{
+ AccessKeyID: "access key id",
+ SecretAccessKey: flagext.SecretWithValue("secret access key"),
+ SessionToken: flagext.SecretWithValue("session token"),
}
+
+ output, err := yaml.Marshal(cfg)
+ require.NoError(t, err)
+
+ require.True(t, bytes.Contains(output, []byte("access key id")))
+ require.False(t, bytes.Contains(output, []byte("secret access id")))
+ require.False(t, bytes.Contains(output, []byte("session token")))
}
diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go
index 9ab8c9116339f..65817f38c9d9f 100644
--- a/pkg/storage/chunk/client/aws/s3_storage_client.go
+++ b/pkg/storage/chunk/client/aws/s3_storage_client.go
@@ -563,7 +563,7 @@ func isContextErr(err error) bool {
}
// IsStorageTimeoutErr returns true if error means that object cannot be retrieved right now due to server-side timeouts.
-func (a *S3ObjectClient) IsStorageTimeoutErr(err error) bool {
+func IsStorageTimeoutErr(err error) bool {
// TODO(dannyk): move these out to be generic
// context errors are all client-side
if isContextErr(err) {
@@ -599,7 +599,7 @@ func (a *S3ObjectClient) IsStorageTimeoutErr(err error) bool {
}
// IsStorageThrottledErr returns true if error means that object cannot be retrieved right now due to throttling.
-func (a *S3ObjectClient) IsStorageThrottledErr(err error) bool {
+func IsStorageThrottledErr(err error) bool {
if rerr, ok := err.(awserr.RequestFailure); ok {
// https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html
@@ -609,6 +609,11 @@ func (a *S3ObjectClient) IsStorageThrottledErr(err error) bool {
return false
}
+
+func IsRetryableErr(err error) bool {
+ return IsStorageTimeoutErr(err) || IsStorageThrottledErr(err)
+}
+
func (a *S3ObjectClient) IsRetryableErr(err error) bool {
- return a.IsStorageTimeoutErr(err) || a.IsStorageThrottledErr(err)
+ return IsRetryableErr(err)
}
diff --git a/pkg/storage/chunk/client/aws/s3_thanos_object_client.go b/pkg/storage/chunk/client/aws/s3_thanos_object_client.go
new file mode 100644
index 0000000000000..e00ded920d552
--- /dev/null
+++ b/pkg/storage/chunk/client/aws/s3_thanos_object_client.go
@@ -0,0 +1,44 @@
+package aws
+
+import (
+ "context"
+
+ "github.com/go-kit/log"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/storage/bucket"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client/hedging"
+)
+
+func NewS3ThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (client.ObjectClient, error) {
+ b, err := newS3ThanosObjectClient(ctx, cfg, component, logger, false, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ var hedged objstore.Bucket
+ if hedgingCfg.At != 0 {
+ hedged, err = newS3ThanosObjectClient(ctx, cfg, component, logger, true, hedgingCfg)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ o := bucket.NewObjectClientAdapter(b, hedged, logger, bucket.WithRetryableErrFunc(IsRetryableErr))
+ return o, nil
+}
+
+func newS3ThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedging bool, hedgingCfg hedging.Config) (objstore.Bucket, error) {
+ if hedging {
+ hedgedTrasport, err := hedgingCfg.RoundTripperWithRegisterer(nil, prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer))
+ if err != nil {
+ return nil, err
+ }
+
+ cfg.S3.HTTP.Transport = hedgedTrasport
+ }
+
+ return bucket.NewClient(ctx, bucket.S3, cfg, component, logger)
+}
diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go
index 79135abd26d00..7f4046a47d868 100644
--- a/pkg/storage/factory.go
+++ b/pkg/storage/factory.go
@@ -654,6 +654,10 @@ func internalNewObjectClient(storeName, component string, cfg Config, clientMetr
if cfg.CongestionControl.Enabled {
s3Cfg.BackoffConfig.MaxRetries = 1
}
+
+ if cfg.UseThanosObjstore {
+ return aws.NewS3ThanosObjectClient(context.Background(), cfg.ObjectStore, component, util_log.Logger, cfg.Hedging)
+ }
return aws.NewS3ObjectClient(s3Cfg, cfg.Hedging)
case types.StorageTypeAlibabaCloud:
diff --git a/pkg/util/http.go b/pkg/util/http.go
index c3c64ea1e3a86..3fdfca6df24f1 100644
--- a/pkg/util/http.go
+++ b/pkg/util/http.go
@@ -298,3 +298,12 @@ func FlagFromValues(values url.Values, key string, d bool) bool {
return d
}
}
+
+func IsValidURL(endpoint string) bool {
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return false
+ }
+
+ return u.Scheme != "" && u.Host != ""
+}
|
feat
|
AWS backend using thanos.io/objstore (#11221)
|
848c31fce938a97a56ec1058b052e156ad980944
|
2025-01-16 21:06:56
|
Robert Fratto
|
chore(dataobj): temporarily disable sorting (#15799)
| false
|
diff --git a/pkg/dataobj/dataobj_test.go b/pkg/dataobj/dataobj_test.go
index 67245ca199a99..31c89ab3ddab2 100644
--- a/pkg/dataobj/dataobj_test.go
+++ b/pkg/dataobj/dataobj_test.go
@@ -96,7 +96,10 @@ func Test(t *testing.T) {
actual, err := result.Collect(reader.Streams(context.Background(), objects[0]))
require.NoError(t, err)
- require.Equal(t, sortStreams(t, streams), actual)
+
+ // TODO(rfratto): reenable once sorting is reintroduced.
+ _ = actual
+ // require.Equal(t, sortStreams(t, streams), actual)
})
}
diff --git a/pkg/dataobj/internal/sections/logs/logs.go b/pkg/dataobj/internal/sections/logs/logs.go
index 8a1387431bc04..ce7038d28b486 100644
--- a/pkg/dataobj/internal/sections/logs/logs.go
+++ b/pkg/dataobj/internal/sections/logs/logs.go
@@ -173,10 +173,9 @@ func (l *Logs) EncodeTo(enc *encoding.Encoder) error {
// 2. Move some columns into an aggregated column which holds multiple label
// keys and values.
- // Create a sorted dataset for us to encode.
- dset, err := l.sort()
+ dset, err := l.buildDataset()
if err != nil {
- return fmt.Errorf("sorting logs: %w", err)
+ return fmt.Errorf("building dataset: %w", err)
}
cols, err := result.Collect(dset.ListColumns(context.Background())) // dset is in memory; "real" context not needed.
if err != nil {
@@ -194,7 +193,7 @@ func (l *Logs) EncodeTo(enc *encoding.Encoder) error {
}()
// Encode our columns. The slice order here *must* match the order in
- // [Logs.sort]!
+ // [Logs.buildDataset]!
{
errs := make([]error, 0, len(cols))
errs = append(errs, encodeColumn(logsEnc, logsmd.COLUMN_TYPE_STREAM_ID, cols[0]))
@@ -211,7 +210,7 @@ func (l *Logs) EncodeTo(enc *encoding.Encoder) error {
return logsEnc.Commit()
}
-func (l *Logs) sort() (dataset.Dataset, error) {
+func (l *Logs) buildDataset() (dataset.Dataset, error) {
// Our columns are ordered as follows:
//
// 1. StreamID
@@ -242,9 +241,18 @@ func (l *Logs) sort() (dataset.Dataset, error) {
messages, _ := l.messages.Flush()
columns = append(columns, messages)
- // dset is in memory, so we don't need a "real" context in dataset.Sort.
- dset := dataset.FromMemory(columns)
- return dataset.Sort(context.Background(), dset, []dataset.Column{streamID, timestamp}, l.pageSize)
+ // TODO(rfratto): We need to be able to sort the columns first by StreamID
+ // and then by timestamp, but as it is now this is way too slow; sorting a
+ // 20MB dataset took several minutes due to the number of page loads
+ // happening across streams.
+ //
+ // Sorting can be made more efficient by:
+ //
+ // 1. Separating streams into separate datasets while appending
+ // 2. Sorting each stream separately
+ // 3. Combining sorted streams into a single dataset, which will already be
+ // sorted.
+ return dataset.FromMemory(columns), nil
}
func encodeColumn(enc *encoding.LogsEncoder, columnType logsmd.ColumnType, column dataset.Column) error {
diff --git a/pkg/dataobj/internal/sections/logs/logs_test.go b/pkg/dataobj/internal/sections/logs/logs_test.go
index 4b9b1b37f7de6..abf204b833252 100644
--- a/pkg/dataobj/internal/sections/logs/logs_test.go
+++ b/pkg/dataobj/internal/sections/logs/logs_test.go
@@ -15,6 +15,8 @@ import (
)
func Test(t *testing.T) {
+ t.Skip("Disabled until sorting is reimplemented")
+
records := []logs.Record{
{
StreamID: 1,
|
chore
|
temporarily disable sorting (#15799)
|
d5ecf9aa7b2bd05a8ef00030c7c0f8a043996114
|
2024-03-26 15:51:48
|
dependabot[bot]
|
chore(deps): bump google.golang.org/protobuf from 1.32.0 to 1.33.0 in /operator (#12207)
| false
|
diff --git a/operator/go.mod b/operator/go.mod
index f580ea5b6d6d2..4e6b1d65bd67f 100644
--- a/operator/go.mod
+++ b/operator/go.mod
@@ -150,7 +150,7 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
google.golang.org/grpc v1.59.0 // indirect
- google.golang.org/protobuf v1.32.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.28.4 // indirect
diff --git a/operator/go.sum b/operator/go.sum
index 220e1e2ebb5e9..6d861a1180a9d 100644
--- a/operator/go.sum
+++ b/operator/go.sum
@@ -1916,8 +1916,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
-google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
chore
|
bump google.golang.org/protobuf from 1.32.0 to 1.33.0 in /operator (#12207)
|
7553775d7234c9782254587eecef99c81da5d034
|
2024-07-25 18:43:21
|
benclive
|
chore: Remove client header interceptors from metastore client (#13659)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index c0e830fce7757..0e1687d06cc78 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -115,8 +115,8 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set
[querier: <querier>]
querier_rf1:
- # Enable the RF1 querier. If set, replaces the usual querier with a RF-1
- # querier when using 'ALL' target.
+ # Enable the RF1 querier. If set, replaces the usual querier with an RF-1
+ # querier.
# CLI flag: -querier-rf1.enabled
[enabled: <boolean> | default = false]
diff --git a/pkg/ingester-rf1/metastore/client/client.go b/pkg/ingester-rf1/metastore/client/client.go
index 044ad613d7ed5..5275fbd5fedeb 100644
--- a/pkg/ingester-rf1/metastore/client/client.go
+++ b/pkg/ingester-rf1/metastore/client/client.go
@@ -13,7 +13,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
- metastorepb "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb"
+ "github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/metastorepb"
"github.com/grafana/loki/v3/pkg/util/constants"
)
@@ -106,12 +106,10 @@ const grpcServiceConfig = `{
func instrumentation(latency *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) {
var unaryInterceptors []grpc.UnaryClientInterceptor
unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()))
- unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor)
unaryInterceptors = append(unaryInterceptors, middleware.UnaryClientInstrumentInterceptor(latency))
var streamInterceptors []grpc.StreamClientInterceptor
streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()))
- streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor)
streamInterceptors = append(streamInterceptors, middleware.StreamClientInstrumentInterceptor(latency))
return unaryInterceptors, streamInterceptors
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 3b08c0beb0a86..14c7290bfa94b 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -752,7 +752,7 @@ func (t *Loki) setupModuleManager() error {
Write: {Ingester, IngesterRF1, Distributor, PatternIngester},
Backend: {QueryScheduler, Ruler, Compactor, IndexGateway, BloomGateway, BloomCompactor},
- All: {QueryScheduler, QueryFrontend, Querier, Ingester, IngesterRF1, PatternIngester, Distributor, Ruler, Compactor},
+ All: {QueryScheduler, QueryFrontend, Querier, Ingester, IngesterRF1, PatternIngester, Distributor, Ruler, Compactor, Metastore},
}
if t.Cfg.Querier.PerRequestLimitsEnabled {
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 03637e2e598da..1ede7ee806b79 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -1814,6 +1814,12 @@ func (t *Loki) initAnalytics() (services.Service, error) {
}
func (t *Loki) initMetastore() (services.Service, error) {
+ if !t.Cfg.IngesterRF1.Enabled {
+ return nil, nil
+ }
+ if t.Cfg.isTarget(All) {
+ t.Cfg.MetastoreClient.MetastoreAddress = fmt.Sprintf("localhost:%s", t.Cfg.Server.GRPCListenAddress)
+ }
m, err := metastore.New(t.Cfg.Metastore, log.With(util_log.Logger, "component", "metastore"), prometheus.DefaultRegisterer, t.health)
if err != nil {
return nil, err
diff --git a/pkg/querier-rf1/querier.go b/pkg/querier-rf1/querier.go
index d4952ea59745c..9504fe23482ab 100644
--- a/pkg/querier-rf1/querier.go
+++ b/pkg/querier-rf1/querier.go
@@ -57,7 +57,7 @@ type Config struct {
// RegisterFlags register flags.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.Engine.RegisterFlagsWithPrefix("querier-rf1", f)
- f.BoolVar(&cfg.Enabled, "querier-rf1.enabled", false, "Enable the RF1 querier. If set, replaces the usual querier with a RF-1 querier when using 'ALL' target.")
+ f.BoolVar(&cfg.Enabled, "querier-rf1.enabled", false, "Enable the RF1 querier. If set, replaces the usual querier with an RF-1 querier.")
f.DurationVar(&cfg.ExtraQueryDelay, "querier-rf1.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.")
f.IntVar(&cfg.MaxConcurrent, "querier-rf1.max-concurrent", 4, "The maximum number of queries that can be simultaneously processed by the querier.")
f.BoolVar(&cfg.PerRequestLimitsEnabled, "querier-rf1.per-request-limits-enabled", false, "When true, querier limits sent via a header are enforced.")
|
chore
|
Remove client header interceptors from metastore client (#13659)
|
a7e0e48245bf02f520d59af4eeecf6985fa0a8db
|
2019-08-15 20:07:39
|
Robert Fratto
|
ksonnet: update ingester config to transfer chunks on rollout (#894)
| false
|
diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet
index 2186eb209355c..29bdf24caa146 100644
--- a/production/ksonnet/loki/config.libsonnet
+++ b/production/ksonnet/loki/config.libsonnet
@@ -107,6 +107,7 @@
ingester: {
chunk_idle_period: '15m',
chunk_block_size: 262144,
+ max_transfer_retries: 60,
lifecycler: {
ring: {
@@ -125,8 +126,8 @@
num_tokens: 512,
heartbeat_period: '5s',
- join_after: '10s',
- claim_on_rollout: false,
+ join_after: '30s',
+ claim_on_rollout: true,
interface_names: ['eth0'],
},
},
|
ksonnet
|
update ingester config to transfer chunks on rollout (#894)
|
813f0d2d0b7ad410c35d76d6bd3cce0018eb21e8
|
2019-07-08 19:41:52
|
sh0rez
|
feat(docker): multi-arch Dockerfile (#668)
| false
|
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000..029a640a26731
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,53 @@
+# These may be overwritten by --build-arg to build promtail or debug images
+ARG APP=loki
+ARG TYPE=production
+
+# ca-certificates
+FROM alpine:3.9 as ssl
+RUN apk add --update --no-cache ca-certificates
+
+# use grafana/loki-build-image to compile binaries
+FROM grafana/loki-build-image as golang
+ARG GOARCH="amd64"
+COPY . /go/src/github.com/grafana/loki
+WORKDIR /go/src/github.com/grafana/loki
+RUN touch loki-build-image/.uptodate &&\
+ mkdir /build
+
+# production image
+FROM golang as builder-production
+ARG APP
+RUN make BUILD_IN_CONTAINER=false cmd/${APP}/${APP} &&\
+ mv cmd/${APP}/${APP} /build/${APP}
+
+FROM scratch as production
+COPY --from=ssl /etc/ssl /etc/ssl
+COPY --from=builder-production /build/${APP} /usr/bin/${APP}
+
+# debug image (only arm64 supported, because of go-delve/delve#118)
+FROM golang as builder-debug
+ARG APP
+RUN go get github.com/go-delve/delve/cmd/dlv &&\
+ make BUILD_IN_CONTAINER=false cmd/promtail/promtail-debug &&\
+ mv cmd/${APP}/${APP}-debug /build/app-debug &&\
+ mv cmd/${APP}/dlv /build/dlv
+
+FROM alpine:3.9 as debug
+COPY --from=ssl /etc/ssl /etc/ssl
+COPY --from=builder-debug /build/app-debug /usr/bin/app-debug
+COPY --from=builder-debug /build/dlv /usr/bin/dlv
+RUN apk add --no-cache libc6-compat
+EXPOSE 40000
+ENTRYPOINT ["/usr/bin/dlv", "--listen=:40000", "--headless=true", "--api-version=2", "exec", "/usr/bin/app-debug", "--"]
+
+# final image with configuration
+FROM ${TYPE} as promtail
+COPY cmd/promtail/promtail-local-config.yaml cmd/promtail/promtail-docker-config.yaml /etc/promtail/
+ENTRYPOINT ["/usr/bin/promtail"]
+
+FROM ${TYPE} as loki
+COPY cmd/loki/loki-local-config.yaml /etc/loki/local-config.yaml
+EXPOSE 80
+ENTRYPOINT ["/usr/bin/loki"]
+
+FROM ${APP}
|
feat
|
multi-arch Dockerfile (#668)
|
f3b72c8b591e1cfc49a2688316382dcb9ff22b3d
|
2022-09-27 21:42:39
|
Dylan Guedes
|
promtail: Change max support compressed line size to be 2MB (#7268)
| false
|
diff --git a/clients/pkg/promtail/targets/file/decompresser.go b/clients/pkg/promtail/targets/file/decompresser.go
index af1b1429e2752..9171c0f2f401d 100644
--- a/clients/pkg/promtail/targets/file/decompresser.go
+++ b/clients/pkg/promtail/targets/file/decompresser.go
@@ -189,8 +189,9 @@ func (t *decompressor) readLines() {
level.Info(t.logger).Log("msg", "successfully mounted reader", "path", t.path, "ext", filepath.Ext(t.path))
- maxLoglineSize := 4096
- buffer := make([]byte, maxLoglineSize)
+ bufferSize := 4096
+ buffer := make([]byte, bufferSize)
+ maxLoglineSize := 2000000 // 2 MB
scanner := bufio.NewScanner(r)
scanner.Buffer(buffer, maxLoglineSize)
for line := 1; ; line++ {
diff --git a/docs/sources/clients/promtail/_index.md b/docs/sources/clients/promtail/_index.md
index 2fc65bf271586..964939663c4fd 100644
--- a/docs/sources/clients/promtail/_index.md
+++ b/docs/sources/clients/promtail/_index.md
@@ -43,7 +43,7 @@ relies on file extensions. If a discovered file has an expected compression file
extension, Promtail will **lazily** decompress the compressed file and push the
parsed data to Loki. Important details are:
* It relies on the `\n` character to separate the data into different log lines.
-* The max expected log line is 4096 bytes within the compressed file.
+* The max expected log line is 2MB bytes within the compressed file.
* The data is decompressed in blocks of 4096 bytes. i.e: it first fetches a block of 4096 bytes
from the compressed file and process it. After processing this block and pushing the data to Loki,
it fetches the following 4096 bytes, and so on.
|
promtail
|
Change max support compressed line size to be 2MB (#7268)
|
e95cc6cc0992155f72232c7a73636396984799a6
|
2024-03-12 13:04:36
|
Owen Diehl
|
chore(blooms): Adds basic tracing implementation to bloom-gw (#12175)
| false
|
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index 96b08636286e2..26f4d27d951cc 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -44,13 +44,13 @@ package bloomgateway
import (
"context"
"fmt"
- "sync"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/tenant"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/atomic"
@@ -76,12 +76,6 @@ var (
responsesPool = queue.NewSlicePool[v1.Output](1<<6, 1<<16, 2)
)
-// SyncMap is a map structure which can be synchronized using the RWMutex
-type SyncMap[k comparable, v any] struct {
- sync.RWMutex
- Map map[k]v
-}
-
type Gateway struct {
services.Service
@@ -200,6 +194,9 @@ func (g *Gateway) stopping(_ error) error {
// FilterChunkRefs implements BloomGatewayServer
func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunkRefRequest) (*logproto.FilterChunkRefResponse, error) {
+ sp, ctx := opentracing.StartSpanFromContext(ctx, "bloomgateway.FilterChunkRefs")
+ defer sp.Finish()
+
tenantID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
@@ -237,6 +234,12 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
}, nil
}
+ sp.LogKV(
+ "filters", len(filters),
+ "days", len(seriesByDay),
+ "series_requested", len(req.Refs),
+ )
+
tasks := make([]Task, 0, len(seriesByDay))
responses := make([][]v1.Output, 0, len(seriesByDay))
for _, seriesForDay := range seriesByDay {
@@ -265,19 +268,26 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
// request on the first error, there can be cases where the request context
// is not done yet and the consumeTask() function wants to send to the
// tasksCh, but nobody reads from it any more.
+ queueStart := time.Now()
tasksCh := make(chan Task, len(tasks))
for _, task := range tasks {
task := task
task.enqueueTime = time.Now()
level.Info(logger).Log("msg", "enqueue task", "task", task.ID, "table", task.table, "series", len(task.series))
- g.queue.Enqueue(tenantID, nil, task, func() {
+
+ // TODO(owen-d): gracefully handle full queues
+ if err := g.queue.Enqueue(tenantID, nil, task, func() {
// When enqueuing, we also add the task to the pending tasks
_ = g.pendingTasks.Inc()
- })
+ }); err != nil {
+ return nil, errors.Wrap(err, "failed to enqueue task")
+ }
// TODO(owen-d): use `concurrency` lib, bound parallelism
go g.consumeTask(ctx, task, tasksCh)
}
+ sp.LogKV("enqueue_duration", time.Since(queueStart).String())
+
remaining := len(tasks)
preFilterSeries := len(req.Refs)
@@ -301,6 +311,8 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
}
}
+ sp.LogKV("msg", "received all responses")
+
filtered := filterChunkRefs(req, responses)
// free up the responses
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index 1790f4ba87beb..c8d6327ee425d 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -7,6 +7,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
@@ -122,7 +123,24 @@ func (p *processor) processBlock(_ context.Context, blockQuerier *v1.BlockQuerie
tokenizer := v1.NewNGramTokenizer(schema.NGramLen(), 0)
iters := make([]v1.PeekingIterator[v1.Request], 0, len(tasks))
+
+ // collect spans & run single defer to avoid blowing call stack
+ // if there are many tasks
+ spans := make([]opentracing.Span, 0, len(tasks))
+ defer func() {
+ for _, sp := range spans {
+ sp.Finish()
+ }
+ }()
+
for _, task := range tasks {
+ // add spans for each task context for this block
+ sp, _ := opentracing.StartSpanFromContext(task.ctx, "bloomgateway.ProcessBlock")
+ spans = append(spans, sp)
+ md, _ := blockQuerier.Metadata()
+ blk := bloomshipper.BlockRefFrom(task.Tenant, task.table.String(), md)
+ sp.LogKV("block", blk.String())
+
it := v1.NewPeekingIter(task.RequestIter(tokenizer))
iters = append(iters, it)
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 67b39245c024b..53271bf951c83 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -196,16 +196,22 @@ func (c ClosableReadSeekerAdapter) Close() error {
return nil
}
+func BlockRefFrom(tenant, table string, md v1.BlockMetadata) BlockRef {
+ return BlockRef{
+ Ref: Ref{
+ TenantID: tenant,
+ TableName: table,
+ Bounds: md.Series.Bounds,
+ StartTimestamp: md.Series.FromTs,
+ EndTimestamp: md.Series.ThroughTs,
+ Checksum: md.Checksum,
+ },
+ }
+}
+
func BlockFrom(tenant, table string, blk *v1.Block) (Block, error) {
md, _ := blk.Metadata()
- ref := Ref{
- TenantID: tenant,
- TableName: table,
- Bounds: md.Series.Bounds,
- StartTimestamp: md.Series.FromTs,
- EndTimestamp: md.Series.ThroughTs,
- Checksum: md.Checksum,
- }
+ ref := BlockRefFrom(tenant, table, md)
// TODO(owen-d): pool
buf := bytes.NewBuffer(nil)
@@ -218,7 +224,7 @@ func BlockFrom(tenant, table string, blk *v1.Block) (Block, error) {
reader := bytes.NewReader(buf.Bytes())
return Block{
- BlockRef: BlockRef{Ref: ref},
+ BlockRef: ref,
Data: ClosableReadSeekerAdapter{reader},
}, nil
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go
index 887bbdb1b8f99..715f451b0441d 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store.go
@@ -3,7 +3,7 @@ package bloomshipper
import (
"context"
"fmt"
- "path/filepath"
+ "path"
"sort"
"github.com/go-kit/log"
@@ -57,7 +57,7 @@ func (b *bloomStoreEntry) ResolveMetas(ctx context.Context, params MetaSearchPar
var refs []MetaRef
tables := tablesForRange(b.cfg, params.Interval)
for _, table := range tables {
- prefix := filepath.Join(rootFolder, table, params.TenantID, metasFolder)
+ prefix := path.Join(rootFolder, table, params.TenantID, metasFolder)
level.Debug(b.fetcher.logger).Log(
"msg", "listing metas",
"store", b.cfg.From,
|
chore
|
Adds basic tracing implementation to bloom-gw (#12175)
|
e2aee2a9c32817486acb601e773d9d08090e19b9
|
2020-11-06 12:41:52
|
Cyril Tovena
|
logql: Improve template format (#2822)
| false
|
diff --git a/docs/sources/logql/_index.md b/docs/sources/logql/_index.md
index 51bb049a753e5..f353bc676dd85 100644
--- a/docs/sources/logql/_index.md
+++ b/docs/sources/logql/_index.md
@@ -295,7 +295,7 @@ Will extract and rewrite the log line to only contains the query and the duratio
You can use double quoted string for the template or single backtick \``\{{.label_name}}`\` to avoid the need to escape special characters.
-See [functions](#Template-functions) to learn about available functions in the template format.
+See [functions](functions/) to learn about available functions in the template format.
#### Labels Format Expression
@@ -303,89 +303,14 @@ The `| label_format` expression can renamed, modify or add labels. It takes as p
When both side are label identifiers, for example `dst=src`, the operation will rename the `src` label into `dst`.
-The left side can alternatively be a template string (double quoted or backtick), for example `dst="{{.status}} {{.query}}"`, in which case the `dst` label value will be replace by the result of the [text/template](https://golang.org/pkg/text/template/) evaluation. This is the same template engine as the `| line_format` expression, this mean labels are available as variables and you can use the same list of [functions](#Template-functions).
+The left side can alternatively be a template string (double quoted or backtick), for example `dst="{{.status}} {{.query}}"`, in which case the `dst` label value is replaced by the result of the [text/template](https://golang.org/pkg/text/template/) evaluation. This is the same template engine as the `| line_format` expression, which means labels are available as variables and you can use the same list of [functions](functions/).
-In both case if the destination label doesn't exist a new one will be created.
+In both cases, if the destination label doesn't exist, then a new one is created.
The renaming form `dst=src` will _drop_ the `src` label after remapping it to the `dst` label. However, the _template_ form will preserve the referenced labels, such that `dst="{{.src}}"` results in both `dst` and `src` having the same value.
> A single label name can only appear once per expression. This means `| label_format foo=bar,foo="new"` is not allowed but you can use two expressions for the desired effect: `| label_format foo=bar | label_format foo="new"`
-#### Template functions
-
-The text template format used in `| line_format` and `| label_format` support functions the following list of functions.
-
-##### ToLower & ToUpper
-
-Convert the entire string to lowercase or uppercase:
-
-Examples:
-
-```template
-"{{.request_method | ToLower}}"
-"{{.request_method | ToUpper}}"
-`{{ToUpper "This is a string" | ToLower}}`
-```
-
-##### Replace
-
-Perform simple string replacement.
-
-It takes three arguments:
-
-- string to replace
-- string to replace with
-- source string
-
-Example:
-
-```template
-`"This is a string" | Replace " " "-"`
-```
-
-The above will produce `This-is-a-string`
-
-##### Trim
-
-`Trim` returns a slice of the string s with all leading and
-trailing Unicode code points contained in cutset removed.
-
-`TrimLeft` and `TrimRight` are the same as `Trim` except that it respectively trim only leading and trailing characters.
-
-```template
-`{{ Trim .query ",. " }}`
-`{{ TrimLeft .uri ":" }}`
-`{{ TrimRight .path "/" }}`
-```
-
-`TrimSpace` TrimSpace returns string s with all leading
-and trailing white space removed, as defined by Unicode.
-
-```template
-{{ TrimSpace .latency }}
-```
-
-`TrimPrefix` and `TrimSuffix` will trim respectively the prefix or suffix supplied.
-
-```template
-{{ TrimPrefix .path "/" }}
-```
-
-##### Regex
-
-`regexReplaceAll` returns a copy of the input string, replacing matches of the Regexp with the replacement string replacement. Inside string replacement, $ signs are interpreted as in Expand, so for instance $1 represents the text of the first sub-match. See the golang [docs](https://golang.org/pkg/regexp/#Regexp.ReplaceAll) for detailed examples.
-
-```template
-`{{ regexReplaceAllLiteral "(a*)bc" .some_label "${1}a" }}`
-```
-
-`regexReplaceAllLiteral` returns a copy of the input string, replacing matches of the Regexp with the replacement string replacement The replacement string is substituted directly, without using Expand.
-
-```template
-`{{ regexReplaceAllLiteral "(ts=)" .timestamp "timestamp=" }}`
-```
-
-You can combine multiple function using pipe, for example if you want to strip out spaces and make the request method in capital you would write the following template `{{ .request_method | TrimSpace | ToUpper }}`.
### Log Queries Examples
diff --git a/docs/sources/logql/functions.md b/docs/sources/logql/functions.md
new file mode 100644
index 0000000000000..1728aea06053a
--- /dev/null
+++ b/docs/sources/logql/functions.md
@@ -0,0 +1,370 @@
+---
+title: Template functions
+---
+
+The [text template](https://golang.org/pkg/text/template) format used in `| line_format` and `| label_format` support the usage of functions.
+
+All labels are added as variables in the template engine. They can be referenced using they label name prefixed by a `.`(e.g `.label_name`). For example the following template will output the value of the path label:
+
+```template
+{{ .path }}
+```
+
+You can take advantage of [pipeline](https://golang.org/pkg/text/template/#hdr-Pipelines) to join together multiple functions.
+In a chained pipeline, the result of each command is passed as the last argument of the following command.
+
+Example:
+
+```template
+{{ .path | replace " " "_" | trunc 5 | upper }}
+```
+
+## ToLower and ToUpper
+
+This function converts the entire string to lowercase or uppercase.
+
+Signatures:
+
+- `ToLower(string) string`
+- `ToUpper(string) string`
+
+Examples:
+
+```template
+"{{.request_method | ToLower}}"
+"{{.request_method | ToUpper}}"
+`{{ToUpper "This is a string" | ToLower}}`
+```
+
+> **Note:** In Loki 2.1 you can also use respectively [`lower`](#lower) and [`upper`](#upper) shortcut, e.g `{{.request_method | lower }}`.
+
+## Replace string
+
+> **Note:** In Loki 2.1 [`replace`](#replace) (as opposed to `Replace`) is available with a different signature but easier to chain within pipeline.
+
+Use this function to perform a simple string replacement.
+
+Signature:
+
+`Replace(s, old, new string, n int) string`
+
+It takes four arguments:
+
+- `s` source string
+- `old` string to replace
+- `new` string to replace with
+- `n` the maximun amount of replacement (-1 for all)
+
+Example:
+
+```template
+`{{ Replace "This is a string" " " "-" -1 }}`
+```
+
+The results in `This-is-a-string`.
+
+## Trim, TrimLeft, TrimRight, and TrimSpace
+
+> **Note:** In Loki 2.1 [trim](#trim), [trimAll](#trimAll), [trimSuffix](#trimSuffix) and [trimPrefix](trimPrefix) have been added with a different signature for better pipeline chaining.
+
+`Trim` returns a slice of the string s with all leading and
+trailing Unicode code points contained in cutset removed.
+
+Signature: `Trim(value, cutset string) string`
+
+`TrimLeft` and `TrimRight` are the same as `Trim` except that it trims only leading and trailing characters respectively.
+
+```template
+`{{ Trim .query ",. " }}`
+`{{ TrimLeft .uri ":" }}`
+`{{ TrimRight .path "/" }}`
+```
+
+`TrimSpace` TrimSpace returns string s with all leading
+and trailing white space removed, as defined by Unicode.
+
+Signature: `TrimSpace(value string) string`
+
+```template
+{{ TrimSpace .latency }}
+```
+
+`TrimPrefix` and `TrimSuffix` will trim respectively the prefix or suffix supplied.
+
+Signature:
+
+- `TrimPrefix(value string, prefix string) string`
+- `TrimSuffix(value string, suffix string) string`
+
+```template
+{{ TrimPrefix .path "/" }}
+```
+
+## regexReplaceAll and regexReplaceAllLiteral
+
+`regexReplaceAll` returns a copy of the input string, replacing matches of the Regexp with the replacement string replacement. Inside string replacement, $ signs are interpreted as in Expand, so for instance $1 represents the text of the first sub-match. See the golang [Regexp.replaceAll documentation](https://golang.org/pkg/regexp/#Regexp.ReplaceAll) for more examples.
+
+```template
+`{{ regexReplaceAllLiteral "(a*)bc" .some_label "${1}a" }}`
+```
+
+`regexReplaceAllLiteral` function returns a copy of the input string and replaces matches of the Regexp with the replacement string replacement. The replacement string is substituted directly, without using Expand.
+
+```template
+`{{ regexReplaceAllLiteral "(ts=)" .timestamp "timestamp=" }}`
+```
+
+You can combine multiple functions using pipe. For example, to strip out spaces and make the request method in capital, you would write the following template: `{{ .request_method | TrimSpace | ToUpper }}`.
+
+## lower
+
+> Added in Loki 2.1
+
+Use this function to convert to lower case.
+
+Signature:
+
+`lower(string) string`
+
+Examples:
+
+```template
+"{{ .request_method | lower }}"
+`{{ lower "HELLO"}}`
+```
+
+The last example will return `hello`.
+
+## upper
+
+> Added in Loki 2.1
+
+Use this function to convert to upper case.
+
+Signature:
+
+`upper(string) string`
+
+Examples:
+
+```template
+"{{ .request_method | upper }}"
+`{{ upper "hello"}}`
+```
+
+This results in `HELLO`.
+
+## title
+
+> **Note:** Added in Loki 2.1.
+
+Convert to title case.
+
+Signature:
+
+`title(string) string`
+
+Examples:
+
+```template
+"{{.request_method | title}}"
+`{{ title "hello world"}}`
+```
+
+The last example will return `Hello World`.
+
+## trunc
+
+> **Note:** Added in Loki 2.1.
+
+Truncate a string and add no suffix.
+
+Signature:
+
+`trunc(count int,value string) string`
+
+Examples:
+
+```template
+"{{ .path | trunc 2 }}"
+`{{ trunc 5 "hello world"}}` // output: hello
+`{{ trunc -5 "hello world"}}` // output: world
+```
+
+## substr
+
+> **Note:** Added in Loki 2.1.
+
+Get a substring from a string.
+
+Signature:
+
+`trunc(start int,end int,value string) string`
+
+If start is < 0, this calls value[:end].
+If start is >= 0 and end < 0 or end bigger than s length, this calls value[start:]
+Otherwise, this calls value[start, end].
+
+Examples:
+
+```template
+"{{ .path | substr 2 5 }}"
+`{{ substr 0 5 "hello world"}}` // output: hello
+`{{ substr 6 11 "hello world"}}` // output: world
+```
+
+## replace
+
+> **Note:** Added in Loki 2.1.
+
+This function performs simple string replacement.
+
+Signature: `replace(old string, new string, src string) string`
+
+It takes three arguments:
+
+- `old` string to replace
+- `new` string to replace with
+- `src` source string
+
+Examples:
+
+```template
+{{ .cluster | replace "-cluster" "" }}
+{{ replace "hello" "world" "hello world" }}
+```
+
+The last example will return `world world`.
+
+## trim
+
+> **Note:** Added in Loki 2.1.
+
+The trim function removes space from either side of a string.
+
+Signature: `trim(string) string`
+
+Examples:
+
+```template
+{{ .ip | trim }}
+{{ trim " hello " }} // output: hello
+```
+
+## trimAll
+
+> **Note:** Added in Loki 2.1.
+
+Use this function to remove given characters from the front or back of a string.
+
+Signature: `trimAll(chars string,src string) string`
+
+Examples:
+
+```template
+{{ .path | trimAll "/" }}
+{{ trimAll "$" "$5.00" }} // output: 5.00
+```
+
+## trimSuffix
+
+> **Note:** Added in Loki 2.1.
+
+Use this function to trim just the suffix from a string.
+
+Signature: `trimSuffix(suffix string, src string) string`
+
+Examples:
+
+```template
+{{ .path | trimSuffix "/" }}
+{{ trimSuffix "-" "hello-" }} // output: hello
+```
+
+## trimPrefix
+
+> **Note:** Added in Loki 2.1.
+
+Use this function to trim just the prefix from a string.
+
+Signature: `trimPrefix(suffix string, src string) string`
+
+Examples:
+
+```template
+{{ .path | trimPrefix "/" }}
+{{ trimPrefix "-" "-hello" }} // output: hello
+```
+
+## indent
+
+> **Note:** Added in Loki 2.1.
+
+The indent function indents every line in a given string to the specified indent width. This is useful when aligning multi-line strings.
+
+Signature: `indent(spaces int,src string) string`
+
+```template
+{{ indent 4 .query }}
+```
+
+This indents each line contained in the `.query` by four (4) spaces.
+
+## nindent
+
+> **Note:** Added in Loki 2.1.
+
+The nindent function is the same as the indent function, but prepends a new line to the beginning of the string.
+
+Signature: `nindent(spaces int,src string) string`
+
+```template
+{{ nindent 4 .query }}
+```
+
+This will indent every line of text by 4 space characters and add a new line to the beginning.
+
+## repeat
+
+> **Note:** Added in Loki 2.1.
+
+Use this function to repeat a string multiple times.
+
+Signature: `repeat(c int,value string) string`
+
+```template
+{{ repeat 3 "hello" }} // output: hellohellohello
+```
+
+## contains
+
+> **Note:** Added in Loki 2.1.
+
+Use this function to test to see if one string is contained inside of another.
+
+Signature: `contains(s string, src string) bool`
+
+Examples:
+
+```template
+{{ if .err contains "ErrTimeout" }} timeout {{end}}
+{{ if contains "he" "hello" }} yes {{end}}
+```
+
+## hasPrefix and hasSuffix
+
+> **Note:** Added in Loki 2.1.
+
+The `hasPrefix` and `hasSuffix` functions test whether a string has a given prefix or suffix.
+
+Signatures:
+
+- `hasPrefix(prefix string, src string) bool`
+- `hasSuffix(suffix string, src string) bool`
+
+Examples:
+
+```template
+{{ if .err hasSuffix "Timeout" }} timeout {{end}}
+{{ if hasPrefix "he" "hello" }} yes {{end}}
+```
diff --git a/pkg/logql/log/error.go b/pkg/logql/log/error.go
index 04dd20ecdcd15..a518b08c5e590 100644
--- a/pkg/logql/log/error.go
+++ b/pkg/logql/log/error.go
@@ -6,6 +6,7 @@ var (
errLogfmt = "LogfmtParserErr"
errSampleExtraction = "SampleExtractionErr"
errLabelFilter = "LabelFilterErr"
+ errTemplateFormat = "TemplateFormatErr"
ErrorLabel = "__error__"
)
diff --git a/pkg/logql/log/fmt.go b/pkg/logql/log/fmt.go
index 231fa37719043..745aed711974c 100644
--- a/pkg/logql/log/fmt.go
+++ b/pkg/logql/log/fmt.go
@@ -14,6 +14,7 @@ var (
// Available map of functions for the text template engine.
functionMap = template.FuncMap{
+ // olds function deprecated.
"ToLower": strings.ToLower,
"ToUpper": strings.ToUpper,
"Replace": strings.Replace,
@@ -23,6 +24,28 @@ var (
"TrimPrefix": strings.TrimPrefix,
"TrimSuffix": strings.TrimSuffix,
"TrimSpace": strings.TrimSpace,
+
+ // New function ported from https://github.com/Masterminds/sprig/
+ // Those function takes the string as the last parameter, allowing pipe chaining.
+ // Example: .mylabel | lower | substring 0 5
+ "lower": strings.ToLower,
+ "upper": strings.ToUpper,
+ "title": strings.Title,
+ "trunc": trunc,
+ "substr": substring,
+ "contains": contains,
+ "hasPrefix": hasPrefix,
+ "hasSuffix": hasSuffix,
+ "indent": indent,
+ "nindent": nindent,
+ "replace": replace,
+ "repeat": repeat,
+ "trim": strings.TrimSpace,
+ "trimAll": trimAll,
+ "trimSuffix": trimSuffix,
+ "trimPrefix": trimPrefix,
+
+ // regex functions
"regexReplaceAll": func(regex string, s string, repl string) string {
r := regexp.MustCompile(regex)
return r.ReplaceAllString(s, repl)
@@ -51,10 +74,12 @@ func NewFormatter(tmpl string) (*LineFormatter, error) {
}, nil
}
-func (lf *LineFormatter) Process(_ []byte, lbs *LabelsBuilder) ([]byte, bool) {
+func (lf *LineFormatter) Process(line []byte, lbs *LabelsBuilder) ([]byte, bool) {
lf.buf.Reset()
- // todo(cyriltovena): handle error
- _ = lf.Template.Execute(lf.buf, lbs.Labels().Map())
+ if err := lf.Template.Execute(lf.buf, lbs.Labels().Map()); err != nil {
+ lbs.SetErr(errTemplateFormat)
+ return line, true
+ }
// todo(cyriltovena): we might want to reuse the input line or a bytes buffer.
res := make([]byte, len(lf.buf.Bytes()))
copy(res, lf.buf.Bytes())
@@ -150,12 +175,72 @@ func (lf *LabelsFormatter) Process(l []byte, lbs *LabelsBuilder) ([]byte, bool)
continue
}
lf.buf.Reset()
- //todo (cyriltovena): handle error
if data == nil {
data = lbs.Labels().Map()
}
- _ = f.tmpl.Execute(lf.buf, data)
+ if err := f.tmpl.Execute(lf.buf, data); err != nil {
+ lbs.SetErr(errTemplateFormat)
+ continue
+ }
lbs.Set(f.Name, lf.buf.String())
}
return l, true
}
+
+func trunc(c int, s string) string {
+ runes := []rune(s)
+ l := len(runes)
+ if c < 0 && l+c > 0 {
+ return string(runes[l+c:])
+ }
+ if c >= 0 && l > c {
+ return string(runes[:c])
+ }
+ return s
+}
+
+// substring creates a substring of the given string.
+//
+// If start is < 0, this calls string[:end].
+//
+// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
+//
+// Otherwise, this calls string[start, end].
+func substring(start, end int, s string) string {
+ runes := []rune(s)
+ l := len(runes)
+ if end > l {
+ end = l
+ }
+ if start > l {
+ start = l
+ }
+ if start < 0 {
+ if end < 0 {
+ return ""
+ }
+ return string(runes[:end])
+ }
+ if end < 0 {
+ return string(runes[start:])
+ }
+ if start > end {
+ return ""
+ }
+ return string(runes[start:end])
+}
+
+func contains(substr string, str string) bool { return strings.Contains(str, substr) }
+func hasPrefix(substr string, str string) bool { return strings.HasPrefix(str, substr) }
+func hasSuffix(substr string, str string) bool { return strings.HasSuffix(str, substr) }
+func repeat(count int, str string) string { return strings.Repeat(str, count) }
+func replace(old, new, src string) string { return strings.Replace(src, old, new, -1) }
+func trimAll(a, b string) string { return strings.Trim(b, a) }
+func trimSuffix(a, b string) string { return strings.TrimSuffix(b, a) }
+func trimPrefix(a, b string) string { return strings.TrimPrefix(b, a) }
+func indent(spaces int, v string) string {
+ pad := strings.Repeat(" ", spaces)
+ return pad + strings.Replace(v, "\n", "\n"+pad, -1)
+}
+
+func nindent(spaces int, v string) string { return "\n" + indent(spaces, v) }
diff --git a/pkg/logql/log/fmt_test.go b/pkg/logql/log/fmt_test.go
index a7bbf8889f085..784c1434983ff 100644
--- a/pkg/logql/log/fmt_test.go
+++ b/pkg/logql/log/fmt_test.go
@@ -1,6 +1,7 @@
package log
import (
+ "fmt"
"sort"
"testing"
@@ -24,6 +25,115 @@ func Test_lineFormatter_Format(t *testing.T) {
[]byte("fooblipbuzzblop"),
labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
},
+ {
+ "Replace",
+ newMustLineFormatter(`foo{{.foo}}buzz{{ Replace .bar "blop" "bar" -1 }}`),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ []byte("fooblipbuzzbar"),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "replace",
+ newMustLineFormatter(`foo{{.foo}}buzz{{ .bar | replace "blop" "bar" }}`),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ []byte("fooblipbuzzbar"),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "title",
+ newMustLineFormatter(`{{.foo | title }}`),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ []byte("Blip"),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "substr and trunc",
+ newMustLineFormatter(
+ `{{.foo | substr 1 3 }} {{ .bar | trunc 1 }} {{ .bar | trunc 3 }}`,
+ ),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ []byte("li b blo"),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "trim",
+ newMustLineFormatter(
+ `{{.foo | trim }} {{ .bar | trimAll "op" }} {{ .bar | trimPrefix "b" }} {{ .bar | trimSuffix "p" }}`,
+ ),
+ labels.Labels{{Name: "foo", Value: " blip "}, {Name: "bar", Value: "blop"}},
+ []byte("blip bl lop blo"),
+ labels.Labels{{Name: "foo", Value: " blip "}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "lower and upper",
+ newMustLineFormatter(`{{.foo | lower }} {{ .bar | upper }}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte("blip BLOP"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "repeat",
+ newMustLineFormatter(`{{ "foo" | repeat 3 }}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte("foofoofoo"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "indent",
+ newMustLineFormatter(`{{ "foo\n bar" | indent 4 }}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte(" foo\n bar"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "nindent",
+ newMustLineFormatter(`{{ "foo" | nindent 2 }}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte("\n foo"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "contains",
+ newMustLineFormatter(`{{ if .foo | contains "p"}}yes{{end}}-{{ if .foo | contains "z"}}no{{end}}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte("yes-"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "hasPrefix",
+ newMustLineFormatter(`{{ if .foo | hasPrefix "BL" }}yes{{end}}-{{ if .foo | hasPrefix "p"}}no{{end}}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte("yes-"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "hasSuffix",
+ newMustLineFormatter(`{{ if .foo | hasSuffix "Ip" }}yes{{end}}-{{ if .foo | hasSuffix "pw"}}no{{end}}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte("yes-"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "regexReplaceAll",
+ newMustLineFormatter(`{{ regexReplaceAll "(p)" .foo "t" }}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte("BLIt"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "regexReplaceAllLiteral",
+ newMustLineFormatter(`{{ regexReplaceAllLiteral "(p)" .foo "${1}" }}`),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ []byte("BLI${1}"),
+ labels.Labels{{Name: "foo", Value: "BLIp"}, {Name: "bar", Value: "blop"}},
+ },
+ {
+ "err",
+ newMustLineFormatter(`{{.foo Replace "foo"}}`),
+ labels.Labels{{Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ nil,
+ labels.Labels{{Name: ErrorLabel, Value: errTemplateFormat}, {Name: "foo", Value: "blip"}, {Name: "bar", Value: "blop"}},
+ },
{
"missing",
newMustLineFormatter("foo {{.foo}}buzz{{ .bar }}"),
@@ -41,6 +151,8 @@ func Test_lineFormatter_Format(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
+ sort.Sort(tt.lbs)
+ sort.Sort(tt.wantLbs)
builder := NewBaseLabelsBuilder().ForLabels(tt.lbs, tt.lbs.Hash())
builder.Reset()
outLine, _ := tt.fmter.Process(nil, builder)
@@ -129,3 +241,52 @@ func Test_validate(t *testing.T) {
})
}
}
+
+func Test_trunc(t *testing.T) {
+ tests := []struct {
+ s string
+ c int
+ want string
+ }{
+ {"Hello, 世界", -1, "界"},
+ {"Hello, 世界", 1, "H"},
+ {"Hello, 世界", 0, ""},
+ {"Hello, 世界", 20, "Hello, 世界"},
+ {"Hello, 世界", -20, "Hello, 世界"},
+ }
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("%s%d", tt.s, tt.c), func(t *testing.T) {
+ if got := trunc(tt.c, tt.s); got != tt.want {
+ t.Errorf("trunc() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_substring(t *testing.T) {
+
+ tests := []struct {
+ start int
+ end int
+ s string
+ want string
+ }{
+ {1, 8, "Hello, 世界", "ello, 世"},
+ {-10, 8, "Hello, 世界", "Hello, 世"},
+ {1, 10, "Hello, 世界", "ello, 世界"},
+ {-1, 10, "Hello, 世界", "Hello, 世界"},
+ {-1, 1, "Hello, 世界", "H"},
+ {-1, -1, "Hello, 世界", ""},
+ {20, -1, "Hello, 世界", ""},
+ {1, 1, "Hello, 世界", ""},
+ {5, 1, "Hello, 世界", ""},
+ {3, -1, "Hello, 世界", "lo, 世界"},
+ }
+ for _, tt := range tests {
+ t.Run(tt.s, func(t *testing.T) {
+ if got := substring(tt.start, tt.end, tt.s); got != tt.want {
+ t.Errorf("substring() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
|
logql
|
Improve template format (#2822)
|
f43dd58459239057cf8ca0dbe53b5e7ef89c7ae9
|
2023-06-06 22:30:40
|
Periklis Tsirakidis
|
operator: Fix timeout config constructor when only tenants limits (#9623)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 753c93d882e61..73a980f3f93e2 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [9623](https://github.com/grafana/loki/pull/9623) **periklis**: Fix timeout config constructor when only tenants limits
- [9457](https://github.com/grafana/loki/pull/9457) **Red-GV**: Set seccomp profile to runtime default
- [9448](https://github.com/grafana/loki/pull/9448) **btaani**: Include runtime-config in compiling the SHA1 checksum
- [9511](https://github.com/grafana/loki/pull/9511) **xperimental**: Do not update status after setting degraded condition
diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go
index 24e07e375ef47..4f4e32f947dbe 100644
--- a/operator/internal/manifests/options.go
+++ b/operator/internal/manifests/options.go
@@ -135,7 +135,7 @@ func NewTimeoutConfig(s *lokiv1.LimitsSpec) (TimeoutConfig, error) {
}
queryTimeout := lokiDefaultQueryTimeout
- if s.Global.QueryLimits != nil && s.Global.QueryLimits.QueryTimeout != "" {
+ if s.Global != nil && s.Global.QueryLimits != nil && s.Global.QueryLimits.QueryTimeout != "" {
var err error
globalQueryTimeout, err := time.ParseDuration(s.Global.QueryLimits.QueryTimeout)
if err != nil {
diff --git a/operator/internal/manifests/options_test.go b/operator/internal/manifests/options_test.go
index 6d49649620437..48e9853f7870a 100644
--- a/operator/internal/manifests/options_test.go
+++ b/operator/internal/manifests/options_test.go
@@ -95,6 +95,45 @@ func TestNewTimeoutConfig_ReturnsCustomConfig_WhenLimitsSpecNotEmpty_UseMaxTenan
require.Equal(t, want, got)
}
+func TestNewTimeoutConfig_ReturnsCustomConfig_WhenTenantLimitsSpecOnly_ReturnsUseMaxTenantQueryTimeout(t *testing.T) {
+ s := lokiv1.LokiStack{
+ Spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ "tenant-a": {
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "10m",
+ },
+ },
+ "tenant-b": {
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "20m",
+ },
+ },
+ },
+ },
+ },
+ }
+
+ got, err := NewTimeoutConfig(s.Spec.Limits)
+ require.NoError(t, err)
+
+ want := TimeoutConfig{
+ Loki: config.HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 2 * time.Minute,
+ WriteTimeout: 21 * time.Minute,
+ },
+ Gateway: GatewayTimeoutConfig{
+ ReadTimeout: 2*time.Minute + gatewayReadDuration,
+ WriteTimeout: 21*time.Minute + gatewayWriteDuration,
+ UpstreamWriteTimeout: 21 * time.Minute,
+ },
+ }
+
+ require.Equal(t, want, got)
+}
+
func TestNewTimeoutConfig_ReturnsDefaults_WhenGlobalQueryTimeoutParseError(t *testing.T) {
s := lokiv1.LokiStack{
Spec: lokiv1.LokiStackSpec{
|
operator
|
Fix timeout config constructor when only tenants limits (#9623)
|
89a70fbc05fa181abf8b102e7bf45b9a5a4e6001
|
2025-02-26 23:55:39
|
renovate[bot]
|
chore(deps): update dependency eslint-plugin-react-hooks to v5 (main) (#16467)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index ce5562167dfcb..41daa1b9bd946 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -62,7 +62,7 @@
"autoprefixer": "^10.4.20",
"depcheck": "^1.4.7",
"eslint": "^8.38.0",
- "eslint-plugin-react-hooks": "^4.6.0",
+ "eslint-plugin-react-hooks": "^5.0.0",
"eslint-plugin-react-refresh": "^0.4.5",
"postcss": "^8.5.1",
"tailwindcss": "^3.4.1",
@@ -4051,16 +4051,16 @@
}
},
"node_modules/eslint-plugin-react-hooks": {
- "version": "4.6.2",
- "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz",
- "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==",
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-5.1.0.tgz",
+ "integrity": "sha512-mpJRtPgHN2tNAvZ35AMfqeB3Xqeo273QxrHJsbBEPWODRM4r0yB6jfoROqKEYrOn27UtRPpcpHc2UqyBSuUNTw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=10"
},
"peerDependencies": {
- "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0"
+ "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0"
}
},
"node_modules/eslint-plugin-react-refresh": {
diff --git a/pkg/ui/frontend/package.json b/pkg/ui/frontend/package.json
index 4159d85fb249e..c25a3acbe8315 100644
--- a/pkg/ui/frontend/package.json
+++ b/pkg/ui/frontend/package.json
@@ -64,7 +64,7 @@
"autoprefixer": "^10.4.20",
"depcheck": "^1.4.7",
"eslint": "^8.38.0",
- "eslint-plugin-react-hooks": "^4.6.0",
+ "eslint-plugin-react-hooks": "^5.0.0",
"eslint-plugin-react-refresh": "^0.4.5",
"postcss": "^8.5.1",
"tailwindcss": "^3.4.1",
|
chore
|
update dependency eslint-plugin-react-hooks to v5 (main) (#16467)
|
275c97cec7f70e68c56192c565d53a6c2a18ff78
|
2024-10-23 01:18:43
|
renovate[bot]
|
fix(deps): update module github.com/fsouza/fake-gcs-server to v1.50.2 (#14313)
| false
|
diff --git a/go.mod b/go.mod
index d0c6a2e47ab0e..6a9f827250564 100644
--- a/go.mod
+++ b/go.mod
@@ -7,7 +7,7 @@ toolchain go1.23.1
require (
cloud.google.com/go/bigtable v1.33.0
cloud.google.com/go/pubsub v1.45.0
- cloud.google.com/go/storage v1.43.0
+ cloud.google.com/go/storage v1.44.0
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/Azure/go-autorest/autorest/adal v0.9.24
@@ -36,7 +36,7 @@ require (
github.com/fatih/color v1.16.0
github.com/felixge/fgprof v0.9.5
github.com/fluent/fluent-bit-go v0.0.0-20230731091245-a7a013e2473c
- github.com/fsouza/fake-gcs-server v1.7.0
+ github.com/fsouza/fake-gcs-server v1.50.2
github.com/go-kit/log v0.2.1
github.com/go-logfmt/logfmt v0.6.0
github.com/go-redis/redis/v8 v8.11.5
@@ -65,11 +65,11 @@ require (
github.com/jmespath/go-jmespath v0.4.0
github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.17.10
+ github.com/klauspost/compress v1.17.11
github.com/klauspost/pgzip v1.2.6
github.com/leodido/go-syslog/v4 v4.1.0
github.com/mattn/go-ieproxy v0.0.12
- github.com/minio/minio-go/v7 v7.0.77
+ github.com/minio/minio-go/v7 v7.0.78
github.com/mitchellh/go-wordwrap v1.0.1
github.com/mitchellh/mapstructure v1.5.0
github.com/modern-go/reflect2 v1.0.2
@@ -98,12 +98,12 @@ require (
go.etcd.io/bbolt v1.3.11
go.uber.org/atomic v1.11.0
go.uber.org/goleak v1.3.0
- golang.org/x/crypto v0.27.0
- golang.org/x/net v0.29.0
+ golang.org/x/crypto v0.28.0
+ golang.org/x/net v0.30.0
golang.org/x/sync v0.8.0
- golang.org/x/sys v0.25.0
- golang.org/x/time v0.6.0
- google.golang.org/api v0.197.0
+ golang.org/x/sys v0.26.0
+ golang.org/x/time v0.7.0
+ google.golang.org/api v0.201.0
google.golang.org/grpc v1.67.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0
@@ -151,19 +151,22 @@ require (
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
golang.org/x/oauth2 v0.23.0
- golang.org/x/text v0.18.0
- google.golang.org/protobuf v1.34.2
+ golang.org/x/text v0.19.0
+ google.golang.org/protobuf v1.35.1
gotest.tools v2.2.0+incompatible
k8s.io/apimachinery v0.29.3
k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3
)
require (
- cel.dev/expr v0.16.0 // indirect
- cloud.google.com/go/auth v0.9.3 // indirect
+ cel.dev/expr v0.16.1 // indirect
+ cloud.google.com/go/auth v0.9.8 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
cloud.google.com/go/monitoring v1.21.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
github.com/benbjohnson/immutable v0.4.0 // indirect
github.com/coreos/etcd v3.3.27+incompatible // indirect
github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect
@@ -173,25 +176,29 @@ require (
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/goccy/go-json v0.10.3 // indirect
+ github.com/gorilla/handlers v1.5.2 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/ncw/swift v1.0.53 // indirect
github.com/pires/go-proxyproto v0.7.0 // indirect
+ github.com/pkg/xattr v0.4.10 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
+ go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect
go.opentelemetry.io/otel/sdk v1.29.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect
+ google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect
)
require (
- cloud.google.com/go v0.115.1 // indirect
- cloud.google.com/go/compute/metadata v0.5.0 // indirect
+ cloud.google.com/go v0.116.0 // indirect
+ cloud.google.com/go/compute/metadata v0.5.2 // indirect
cloud.google.com/go/iam v1.2.1 // indirect
cloud.google.com/go/longrunning v0.6.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect
@@ -231,7 +238,7 @@ require (
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
- github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 // indirect
+ github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@@ -358,11 +365,11 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/mod v0.19.0 // indirect
- golang.org/x/term v0.24.0 // indirect
+ golang.org/x/term v0.25.0 // indirect
golang.org/x/tools v0.23.0 // indirect
- google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
+ google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
diff --git a/go.sum b/go.sum
index e5b40372ef457..c4be2fc1151b3 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-cel.dev/expr v0.16.0 h1:yloc84fytn4zmJX2GU3TkXGsaieaV7dQ057Qs4sIG2Y=
-cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg=
+cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g=
+cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
@@ -34,15 +34,15 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
-cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ=
-cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc=
+cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=
+cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U=
cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
-cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U=
-cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk=
+cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8=
+cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
@@ -64,8 +64,8 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
-cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
-cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
+cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
@@ -86,10 +86,12 @@ cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU=
cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g=
-cloud.google.com/go/kms v1.19.1 h1:NPE8zjJuMpECvHsx8lsMwQuWWIdJc6iIDHLJGC/J4bw=
-cloud.google.com/go/kms v1.19.1/go.mod h1:GRbd2v6e9rAVs+IwOIuePa3xcCm7/XpGNyWtBwwOdRc=
+cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY=
+cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM=
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
+cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs=
+cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A=
cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc=
cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0=
cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
@@ -130,9 +132,11 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
-cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
-cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
+cloud.google.com/go/storage v1.44.0 h1:abBzXf4UJKMmQ04xxJf9dYM/fNl24KHoTuBjyJDX2AI=
+cloud.google.com/go/storage v1.44.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE=
cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
+cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew=
+cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA=
cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
@@ -248,6 +252,14 @@ github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt
github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g=
github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/IBM/go-sdk-core/v5 v5.17.5 h1:AjGC7xNee5tgDIjndekBDW5AbypdERHSgib3EZ1KNsA=
@@ -459,8 +471,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs=
-github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
+github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
@@ -644,8 +656,8 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8=
-github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
+github.com/fsouza/fake-gcs-server v1.50.2 h1:ulrS1pavCOCbMZfN5ZPgBRMFWclON9xDsuLBniXtQoE=
+github.com/fsouza/fake-gcs-server v1.50.2/go.mod h1:VU6Zgei4647KuT4XER8WHv5Hcj2NIySndyG8gfvwckA=
github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY=
github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
@@ -1031,8 +1043,9 @@ github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdy
github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
+github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
@@ -1306,8 +1319,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
-github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
@@ -1416,8 +1429,8 @@ github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eyk
github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw=
-github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
+github.com/minio/minio-go/v7 v7.0.78 h1:LqW2zy52fxnI4gg8C2oZviTaKHcBV36scS+RzJnxUFs=
+github.com/minio/minio-go/v7 v7.0.78/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
@@ -1591,6 +1604,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
+github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -1927,6 +1942,8 @@ go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimK
go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI=
go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g=
go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
+go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ=
+go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
@@ -2010,8 +2027,8 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
-golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
+golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
+golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -2139,8 +2156,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
-golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
+golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
+golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -2289,6 +2306,7 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -2310,8 +2328,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
-golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -2321,8 +2339,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
-golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
-golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
+golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
+golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2340,8 +2358,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
-golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
+golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2350,8 +2368,8 @@ golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
-golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
+golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -2451,7 +2469,6 @@ gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6d
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
-google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -2497,8 +2514,8 @@ google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOI
google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
-google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ=
-google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw=
+google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0=
+google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -2609,12 +2626,12 @@ google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+S
google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ=
-google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU=
-google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4=
-google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
-google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9 h1:nFS3IivktIU5Mk6KQa+v6RKkHUpdQpphqGNLxqNnbEk=
+google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:tEzYTYZxbmVNOu0OAFH9HzdJtLn6h4Aj89zzlBCdHms=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@@ -2660,6 +2677,8 @@ google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw=
+google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -2675,8 +2694,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/DataDog/dd-trace-go.v1 v1.19.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
diff --git a/pkg/storage/chunk/client/gcp/fixtures.go b/pkg/storage/chunk/client/gcp/fixtures.go
index 3fc03fb6e0158..a9ab2d0afb127 100644
--- a/pkg/storage/chunk/client/gcp/fixtures.go
+++ b/pkg/storage/chunk/client/gcp/fixtures.go
@@ -49,19 +49,11 @@ func (f *fixture) Clients() (
}
f.gcssrv = fakestorage.NewServer(nil)
- /*
- // Note: fake-gcs-server upgrade does not work in the `dist` tooling builds.
- // Leave at v1.7.0 until the issue is resolved.
- // Example failure: https://github.com/grafana/loki/actions/runs/10744853958/job/29802951861
- // Open issue: https://github.com/fsouza/fake-gcs-server/issues/1739
- // Once the issue is resolved, this code block can be used to replace the
- // `CreateBucket` call below.
- opts := fakestorage.CreateBucketOpts{
- Name: "chunks",
- }
- f.gcssrv.CreateBucketWithOpts(opts)
- */
- f.gcssrv.CreateBucket("chunks")
+
+ opts := fakestorage.CreateBucketOpts{
+ Name: "chunks",
+ }
+ f.gcssrv.CreateBucketWithOpts(opts)
conn, err := grpc.NewClient(f.btsrv.Addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
index f631b6df06d13..0bbe9ed7736c4 100644
--- a/vendor/cel.dev/expr/BUILD.bazel
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -1,3 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//proto/cel/expr:google_rpc_status_go_proto",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
index 841b543576d16..39ed1f94745e7 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
@@ -1,18 +1,18 @@
{
"ai": "0.8.2",
"aiplatform": "1.68.0",
- "auth": "0.8.1",
+ "auth": "0.9.7",
"auth/oauth2adapt": "0.2.4",
- "bigquery": "1.62.0",
- "bigtable": "1.29.0",
- "datastore": "1.17.1",
+ "bigquery": "1.63.1",
+ "bigtable": "1.33.0",
+ "datastore": "1.19.0",
"errorreporting": "0.3.1",
- "firestore": "1.16.0",
+ "firestore": "1.17.0",
"logging": "1.11.0",
"profiler": "0.4.1",
- "pubsub": "1.41.0",
+ "pubsub": "1.44.0",
"pubsublite": "1.8.2",
- "spanner": "1.66.0",
- "storage": "1.43.0",
- "vertexai": "0.12.0"
+ "spanner": "1.69.0",
+ "storage": "1.44.0",
+ "vertexai": "0.13.1"
}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
index 8ac2f38f99937..edbdcf47fd969 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
@@ -1,149 +1,150 @@
{
- "accessapproval": "1.7.12",
- "accesscontextmanager": "1.8.12",
- "advisorynotifications": "1.4.6",
- "alloydb": "1.10.7",
- "analytics": "0.24.0",
- "apigateway": "1.6.12",
- "apigeeconnect": "1.6.12",
- "apigeeregistry": "0.8.10",
- "apikeys": "1.1.12",
- "appengine": "1.8.12",
- "apphub": "0.1.6",
- "apps": "0.4.7",
- "area120": "0.8.12",
- "artifactregistry": "1.14.14",
- "asset": "1.19.6",
- "assuredworkloads": "1.11.12",
- "automl": "1.13.12",
- "backupdr": "1.0.4",
- "baremetalsolution": "1.2.11",
- "batch": "1.9.3",
- "beyondcorp": "1.0.11",
- "billing": "1.18.10",
- "binaryauthorization": "1.8.8",
- "certificatemanager": "1.8.6",
- "channel": "1.17.12",
- "chat": "0.3.1",
- "cloudbuild": "1.16.6",
- "cloudcontrolspartner": "1.0.4",
- "clouddms": "1.7.11",
- "cloudprofiler": "0.3.6",
- "cloudquotas": "1.0.4",
- "cloudtasks": "1.12.13",
- "commerce": "1.0.5",
- "compute": "1.27.5",
- "compute/metadata": "0.5.0",
- "confidentialcomputing": "1.6.1",
- "config": "1.0.5",
- "contactcenterinsights": "1.13.7",
- "container": "1.38.1",
- "containeranalysis": "0.12.2",
- "datacatalog": "1.21.1",
- "dataflow": "0.9.12",
- "dataform": "0.9.9",
- "datafusion": "1.7.12",
- "datalabeling": "0.8.12",
- "dataplex": "1.18.3",
- "dataproc": "2.5.4",
- "dataqna": "0.8.12",
- "datastream": "1.10.11",
- "deploy": "1.21.1",
- "developerconnect": "0.1.4",
- "dialogflow": "1.56.0",
- "discoveryengine": "1.12.0",
- "dlp": "1.17.0",
- "documentai": "1.32.0",
- "domains": "0.9.12",
- "edgecontainer": "1.2.6",
- "edgenetwork": "1.1.3",
- "essentialcontacts": "1.6.13",
- "eventarc": "1.13.11",
- "filestore": "1.8.8",
- "functions": "1.17.0",
- "gkebackup": "1.5.5",
- "gkeconnect": "0.8.12",
- "gkehub": "0.14.12",
- "gkemulticloud": "1.2.5",
- "grafeas": "0.3.10",
- "gsuiteaddons": "1.6.12",
- "iam": "1.1.13",
- "iap": "1.9.11",
- "identitytoolkit": "0.1.4",
- "ids": "1.4.12",
- "iot": "1.7.12",
- "kms": "1.18.5",
- "language": "1.13.1",
- "lifesciences": "0.9.12",
- "longrunning": "0.5.12",
- "managedidentities": "1.6.12",
- "managedkafka": "0.1.6",
- "maps": "1.11.7",
- "mediatranslation": "0.8.12",
- "memcache": "1.10.12",
- "metastore": "1.13.11",
- "migrationcenter": "1.0.5",
- "monitoring": "1.20.4",
- "netapp": "1.2.1",
- "networkconnectivity": "1.14.11",
- "networkmanagement": "1.13.7",
- "networksecurity": "0.9.12",
- "networkservices": "0.1.6",
- "notebooks": "1.11.10",
- "optimization": "1.6.10",
- "orchestration": "1.9.7",
- "orgpolicy": "1.12.8",
- "osconfig": "1.13.3",
- "oslogin": "1.13.8",
- "parallelstore": "0.5.1",
- "phishingprotection": "0.8.12",
- "policysimulator": "0.2.10",
- "policytroubleshooter": "1.10.10",
- "privatecatalog": "0.9.12",
- "privilegedaccessmanager": "0.1.1",
- "rapidmigrationassessment": "1.0.12",
- "recaptchaenterprise": "2.14.3",
- "recommendationengine": "0.8.12",
- "recommender": "1.12.8",
- "redis": "1.16.5",
- "resourcemanager": "1.9.12",
- "resourcesettings": "1.7.5",
- "retail": "1.17.5",
- "run": "1.4.1",
- "scheduler": "1.10.13",
- "secretmanager": "1.13.6",
- "securesourcemanager": "1.1.1",
- "security": "1.17.5",
- "securitycenter": "1.34.0",
- "securitycentermanagement": "1.0.4",
- "securityposture": "0.1.8",
- "servicecontrol": "1.13.7",
- "servicedirectory": "1.11.12",
- "servicehealth": "1.0.5",
- "servicemanagement": "1.9.13",
- "serviceusage": "1.8.11",
- "shell": "1.7.12",
- "shopping": "0.8.7",
- "speech": "1.24.1",
- "storageinsights": "1.0.12",
- "storagetransfer": "1.10.11",
- "streetview": "0.1.5",
- "support": "1.0.11",
- "talent": "1.6.13",
- "telcoautomation": "1.0.4",
- "texttospeech": "1.7.12",
- "tpu": "1.6.12",
- "trace": "1.10.12",
- "translate": "1.11.0",
- "video": "1.22.1",
- "videointelligence": "1.11.12",
- "vision": "2.8.7",
- "visionai": "0.2.5",
- "vmmigration": "1.7.12",
- "vmwareengine": "1.2.1",
- "vpcaccess": "1.7.12",
- "webrisk": "1.9.12",
- "websecurityscanner": "1.6.12",
- "workflows": "1.12.11",
- "workstations": "1.0.5"
+ "accessapproval": "1.8.1",
+ "accesscontextmanager": "1.9.1",
+ "advisorynotifications": "1.5.1",
+ "alloydb": "1.12.1",
+ "analytics": "0.25.1",
+ "apigateway": "1.7.1",
+ "apigeeconnect": "1.7.1",
+ "apigeeregistry": "0.9.1",
+ "apihub": "0.1.1",
+ "apikeys": "1.2.1",
+ "appengine": "1.9.1",
+ "apphub": "0.2.1",
+ "apps": "0.5.1",
+ "area120": "0.9.1",
+ "artifactregistry": "1.15.1",
+ "asset": "1.20.2",
+ "assuredworkloads": "1.12.1",
+ "automl": "1.14.1",
+ "backupdr": "1.1.1",
+ "baremetalsolution": "1.3.1",
+ "batch": "1.11.0",
+ "beyondcorp": "1.1.1",
+ "billing": "1.19.1",
+ "binaryauthorization": "1.9.1",
+ "certificatemanager": "1.9.1",
+ "channel": "1.18.1",
+ "chat": "0.6.0",
+ "cloudbuild": "1.18.0",
+ "cloudcontrolspartner": "1.2.0",
+ "clouddms": "1.8.1",
+ "cloudprofiler": "0.4.1",
+ "cloudquotas": "1.1.1",
+ "cloudtasks": "1.13.1",
+ "commerce": "1.1.1",
+ "compute": "1.28.1",
+ "compute/metadata": "0.5.2",
+ "confidentialcomputing": "1.7.1",
+ "config": "1.1.1",
+ "contactcenterinsights": "1.14.1",
+ "container": "1.40.0",
+ "containeranalysis": "0.13.1",
+ "datacatalog": "1.22.1",
+ "dataflow": "0.10.1",
+ "dataform": "0.10.1",
+ "datafusion": "1.8.1",
+ "datalabeling": "0.9.1",
+ "dataplex": "1.19.1",
+ "dataproc": "2.9.0",
+ "dataqna": "0.9.1",
+ "datastream": "1.11.1",
+ "deploy": "1.22.1",
+ "developerconnect": "0.2.1",
+ "dialogflow": "1.58.0",
+ "discoveryengine": "1.14.0",
+ "dlp": "1.19.0",
+ "documentai": "1.34.0",
+ "domains": "0.10.1",
+ "edgecontainer": "1.3.1",
+ "edgenetwork": "1.2.1",
+ "essentialcontacts": "1.7.1",
+ "eventarc": "1.14.1",
+ "filestore": "1.9.1",
+ "functions": "1.19.1",
+ "gkebackup": "1.6.1",
+ "gkeconnect": "0.11.1",
+ "gkehub": "0.15.1",
+ "gkemulticloud": "1.4.0",
+ "grafeas": "0.3.11",
+ "gsuiteaddons": "1.7.1",
+ "iam": "1.2.1",
+ "iap": "1.10.1",
+ "identitytoolkit": "0.2.1",
+ "ids": "1.5.1",
+ "iot": "1.8.1",
+ "kms": "1.20.0",
+ "language": "1.14.1",
+ "lifesciences": "0.10.1",
+ "longrunning": "0.6.1",
+ "managedidentities": "1.7.1",
+ "managedkafka": "0.2.1",
+ "maps": "1.14.0",
+ "mediatranslation": "0.9.1",
+ "memcache": "1.11.1",
+ "metastore": "1.14.1",
+ "migrationcenter": "1.1.1",
+ "monitoring": "1.21.1",
+ "netapp": "1.4.0",
+ "networkconnectivity": "1.15.1",
+ "networkmanagement": "1.14.1",
+ "networksecurity": "0.10.1",
+ "networkservices": "0.2.1",
+ "notebooks": "1.12.1",
+ "optimization": "1.7.1",
+ "orchestration": "1.11.0",
+ "orgpolicy": "1.14.0",
+ "osconfig": "1.14.1",
+ "oslogin": "1.14.1",
+ "parallelstore": "0.6.1",
+ "phishingprotection": "0.9.1",
+ "policysimulator": "0.3.1",
+ "policytroubleshooter": "1.11.1",
+ "privatecatalog": "0.10.1",
+ "privilegedaccessmanager": "0.2.1",
+ "rapidmigrationassessment": "1.1.1",
+ "recaptchaenterprise": "2.17.1",
+ "recommendationengine": "0.9.1",
+ "recommender": "1.13.1",
+ "redis": "1.17.1",
+ "resourcemanager": "1.10.1",
+ "resourcesettings": "1.8.1",
+ "retail": "1.18.1",
+ "run": "1.5.1",
+ "scheduler": "1.11.1",
+ "secretmanager": "1.14.1",
+ "securesourcemanager": "1.2.1",
+ "security": "1.18.1",
+ "securitycenter": "1.35.1",
+ "securitycentermanagement": "1.1.1",
+ "securityposture": "0.2.1",
+ "servicecontrol": "1.14.1",
+ "servicedirectory": "1.12.1",
+ "servicehealth": "1.1.1",
+ "servicemanagement": "1.10.1",
+ "serviceusage": "1.9.1",
+ "shell": "1.8.1",
+ "shopping": "0.10.0",
+ "speech": "1.25.1",
+ "storageinsights": "1.1.1",
+ "storagetransfer": "1.11.1",
+ "streetview": "0.2.1",
+ "support": "1.1.1",
+ "talent": "1.7.1",
+ "telcoautomation": "1.1.1",
+ "texttospeech": "1.8.1",
+ "tpu": "1.7.1",
+ "trace": "1.11.1",
+ "translate": "1.12.1",
+ "video": "1.23.1",
+ "videointelligence": "1.12.1",
+ "vision": "2.9.1",
+ "visionai": "0.4.1",
+ "vmmigration": "1.8.1",
+ "vmwareengine": "1.3.1",
+ "vpcaccess": "1.8.1",
+ "webrisk": "1.10.1",
+ "websecurityscanner": "1.7.1",
+ "workflows": "1.13.1",
+ "workstations": "1.1.1"
}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json
index 7b1015e63fc8b..c8f1da56d86db 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.115.1"
+ ".": "0.116.0"
}
diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md
index d48e0cfba4798..adc725ca1a72d 100644
--- a/vendor/cloud.google.com/go/CHANGES.md
+++ b/vendor/cloud.google.com/go/CHANGES.md
@@ -1,5 +1,12 @@
# Changes
+## [0.116.0](https://github.com/googleapis/google-cloud-go/compare/v0.115.1...v0.116.0) (2024-10-09)
+
+
+### Features
+
+* **genai:** Add tokenizer package ([#10699](https://github.com/googleapis/google-cloud-go/issues/10699)) ([214af16](https://github.com/googleapis/google-cloud-go/commit/214af1604bf3837f68e96dbf81c1331b90c9375f))
+
## [0.115.1](https://github.com/googleapis/google-cloud-go/compare/v0.115.0...v0.115.1) (2024-08-13)
diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md
index 99514979018e0..63db0209c7dbe 100644
--- a/vendor/cloud.google.com/go/README.md
+++ b/vendor/cloud.google.com/go/README.md
@@ -28,12 +28,16 @@ For an updated list of all of our released APIs please see our
## [Go Versions Supported](#supported-versions)
+**Note:** As of Jan 1, 2025 the Cloud Client Libraries for Go will support the
+two most-recent major Go releases -- the same [policy](https://go.dev/doc/devel/release#policy)
+the Go programming language follows.
+
Our libraries are compatible with at least the three most recent, major Go
releases. They are currently compatible with:
+- Go 1.23
- Go 1.22
- Go 1.21
-- Go 1.20
## Authorization
@@ -56,14 +60,14 @@ client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfil
```
You can exert more control over authorization by using the
-[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to
-create an `oauth2.TokenSource`. Then pass
-[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource)
+[credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) package to
+create an [auth.Credentials](https://pkg.go.dev/cloud.google.com/go/auth#Credentials).
+Then pass [`option.WithAuthCredentials`](https://pkg.go.dev/google.golang.org/api/option#WithAuthCredentials)
to the `NewClient` function:
```go
-tokenSource := ...
-client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
+creds := ...
+client, err := storage.NewClient(ctx, option.WithAuthCredentials(creds))
```
## Contributing
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
index 5584c350b0a7e..c81df7392784a 100644
--- a/vendor/cloud.google.com/go/auth/CHANGES.md
+++ b/vendor/cloud.google.com/go/auth/CHANGES.md
@@ -1,5 +1,43 @@
# Changelog
+## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09)
+
+
+### Bug Fixes
+
+* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962)
+* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287))
+
+## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907)
+
+## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30)
+
+
+### Bug Fixes
+
+* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8))
+
+## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2))
+* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1))
+* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350))
+
+## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11)
+
+
+### Bug Fixes
+
+* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6))
+
## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03)
diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md
index 36de276a0743e..6fe4f0763e318 100644
--- a/vendor/cloud.google.com/go/auth/README.md
+++ b/vendor/cloud.google.com/go/auth/README.md
@@ -1,4 +1,40 @@
-# auth
+# Google Auth Library for Go
-This module is currently EXPERIMENTAL and under active development. It is not
-yet intended to be used.
+[](https://pkg.go.dev/cloud.google.com/go/auth)
+
+## Install
+
+``` bash
+go get cloud.google.com/go/auth@latest
+```
+
+## Usage
+
+The most common way this library is used is transitively, by default, from any
+of our Go client libraries.
+
+### Notable use-cases
+
+- To create a credential directly please see examples in the
+ [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials)
+ package.
+- To create a authenticated HTTP client please see examples in the
+ [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport)
+ package.
+- To create a authenticated gRPC connection please see examples in the
+ [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport)
+ package.
+- To create an ID token please see examples in the
+ [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken)
+ package.
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go
index bc37ea85fb51f..314bd292e3f5e 100644
--- a/vendor/cloud.google.com/go/auth/auth.go
+++ b/vendor/cloud.google.com/go/auth/auth.go
@@ -328,7 +328,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err
defer c.mu.Unlock()
return c.cachedToken, nil
case stale:
- c.tokenAsync(ctx)
+ // Call tokenAsync with a new Context because the user-provided context
+ // may have a short timeout incompatible with async token refresh.
+ c.tokenAsync(context.Background())
// Return the stale token immediately to not block customer requests to Cloud services.
c.mu.Lock()
defer c.mu.Unlock()
@@ -343,13 +345,14 @@ func (c *cachedTokenProvider) tokenState() tokenState {
c.mu.Lock()
defer c.mu.Unlock()
t := c.cachedToken
+ now := timeNow()
if t == nil || t.Value == "" {
return invalid
} else if t.Expiry.IsZero() {
return fresh
- } else if timeNow().After(t.Expiry.Round(0)) {
+ } else if now.After(t.Expiry.Round(0)) {
return invalid
- } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) {
+ } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) {
return stale
}
return fresh
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
index cf56b025a237b..6591b181132f7 100644
--- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go
+++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
@@ -124,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string
}
func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
if opts.UseSelfSignedJWT {
return configureSelfSignedJWT(f, opts)
+ } else if ud != "" && ud != internalauth.DefaultUniverseDomain {
+ // For non-GDU universe domains, token exchange is impossible and services
+ // must support self-signed JWTs.
+ opts.UseSelfSignedJWT = true
+ return configureSelfSignedJWT(f, opts)
}
opts2LO := &auth.Options2LO{
Email: f.ClientEmail,
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
index a34f6b06f8460..d8b5d4fdeb9e0 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
@@ -94,32 +94,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error)
if sp.RegionalCredVerificationURL == "" {
sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
}
- if sp.requestSigner == nil {
- headers := make(map[string]string)
- if sp.shouldUseMetadataServer() {
- awsSessionToken, err := sp.getAWSSessionToken(ctx)
- if err != nil {
- return "", err
- }
-
- if awsSessionToken != "" {
- headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
- }
- }
-
- awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ headers := make(map[string]string)
+ if sp.shouldUseMetadataServer() {
+ awsSessionToken, err := sp.getAWSSessionToken(ctx)
if err != nil {
return "", err
}
- if sp.region, err = sp.getRegion(ctx, headers); err != nil {
- return "", err
- }
- sp.requestSigner = &awsRequestSigner{
- RegionName: sp.region,
- AwsSecurityCredentials: awsSecurityCredentials,
+
+ if awsSessionToken != "" {
+ headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
}
}
+ awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ if err != nil {
+ return "", err
+ }
+ if sp.region, err = sp.getRegion(ctx, headers); err != nil {
+ return "", err
+ }
+ sp.requestSigner = &awsRequestSigner{
+ RegionName: sp.region,
+ AwsSecurityCredentials: awsSecurityCredentials,
+ }
+
// Generate the signed request to AWS STS GetCallerIdentity API.
// Use the required regional endpoint. Otherwise, the request will fail.
req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
index b62a8ae4d5d70..6ae29de6c2789 100644
--- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
+++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
@@ -17,6 +17,7 @@ package credentials
import (
"context"
"crypto/rsa"
+ "errors"
"fmt"
"strings"
"time"
@@ -35,6 +36,9 @@ var (
// configureSelfSignedJWT uses the private key in the service account to create
// a JWT without making a network call.
func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ if len(opts.scopes()) == 0 && opts.Audience == "" {
+ return nil, errors.New("credentials: both scopes and audience are empty")
+ }
pk, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, fmt.Errorf("credentials: could not parse key: %w", err)
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
index efc91c2b0c355..8696df1487fc6 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
@@ -22,7 +22,7 @@ import (
"strings"
"cloud.google.com/go/auth"
- "cloud.google.com/go/compute/metadata"
+ "cloud.google.com/go/auth/internal/compute"
"google.golang.org/grpc"
grpcgoogle "google.golang.org/grpc/credentials/google"
)
@@ -55,7 +55,7 @@ func checkDirectPathEndPoint(endpoint string) bool {
return true
}
-func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool {
+func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool {
if tp == nil {
return false
}
@@ -66,6 +66,9 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool
if tok == nil {
return false
}
+ if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath {
+ return true
+ }
if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" {
return false
}
@@ -91,7 +94,7 @@ func isDirectPathXdsUsed(o *Options) bool {
// configuration allows the use of direct path. If it does not the provided
// grpcOpts and endpoint are returned.
func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) {
- if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) {
+ if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) {
// Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
grpcOpts = []grpc.DialOption{
grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
index 21488e29f0b71..42d4cbe3062ed 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
@@ -22,15 +22,19 @@ import (
"errors"
"fmt"
"net/http"
+ "os"
+ "sync"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"go.opencensus.io/plugin/ocgrpc"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
grpccreds "google.golang.org/grpc/credentials"
grpcinsecure "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/stats"
)
const (
@@ -48,6 +52,27 @@ var (
timeoutDialerOption grpc.DialOption
)
+// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
+// all dial connections to avoid the memory leak documented in
+// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
+//
+// TODO: When this module depends on a version of otelgrpc containing the fix,
+// replace this singleton with inline usage for simplicity.
+// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797.
+var (
+ initOtelStatsHandlerOnce sync.Once
+ otelStatsHandler stats.Handler
+)
+
+// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all
+// dial connections.
+func otelGRPCStatsHandler() stats.Handler {
+ initOtelStatsHandlerOnce.Do(func() {
+ otelStatsHandler = otelgrpc.NewClientHandler()
+ })
+ return otelStatsHandler
+}
+
// ClientCertProvider is a function that returns a TLS client certificate to be
// used when opening TLS connections. It follows the same semantics as
// [crypto/tls.Config.GetClientCertificate].
@@ -294,6 +319,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
// gRPC stats handler.
// This assumes that gRPC options are processed in order, left to right.
grpcOpts = addOCStatsHandler(grpcOpts, opts)
+ grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts)
grpcOpts = append(grpcOpts, opts.GRPCDialOpts...)
return grpc.NewClient(endpoint, grpcOpts...)
@@ -330,15 +356,23 @@ type grpcCredentialsProvider struct {
clientUniverseDomain string
}
-// getClientUniverseDomain returns the default service domain for a given Cloud universe.
-// The default value is "googleapis.com". This is the universe domain
-// configured for the client, which will be compared to the universe domain
-// that is separately configured for the credentials.
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
func (c *grpcCredentialsProvider) getClientUniverseDomain() string {
- if c.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
+ if c.clientUniverseDomain != "" {
+ return c.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
}
- return c.clientUniverseDomain
+ return internal.DefaultUniverseDomain
}
func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
@@ -389,3 +423,10 @@ func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOpt
}
return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
}
+
+func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
+ if opts.DisableTelemetry {
+ return dialOpts
+ }
+ return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler()))
+}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go
index 274bb01254c92..63498ee792be9 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/transport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go
@@ -19,6 +19,7 @@ import (
"crypto/tls"
"net"
"net/http"
+ "os"
"time"
"cloud.google.com/go/auth"
@@ -27,6 +28,7 @@ import (
"cloud.google.com/go/auth/internal/transport"
"cloud.google.com/go/auth/internal/transport/cert"
"go.opencensus.io/plugin/ochttp"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
)
@@ -41,6 +43,9 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
headers: headers,
}
var trans http.RoundTripper = ht
+ // Give OpenTelemetry precedence over OpenCensus in case user configuration
+ // causes both to write the same header (`X-Cloud-Trace-Context`).
+ trans = addOpenTelemetryTransport(trans, opts)
trans = addOCTransport(trans, opts)
switch {
case opts.DisableAuthentication:
@@ -162,6 +167,13 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return rt.RoundTrip(&newReq)
}
+func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
+ if opts.DisableTelemetry {
+ return trans
+ }
+ return otelhttp.NewTransport(trans)
+}
+
func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
if opts.DisableTelemetry {
return trans
@@ -178,13 +190,23 @@ type authTransport struct {
clientUniverseDomain string
}
-// getClientUniverseDomain returns the universe domain configured for the client.
-// The default value is "googleapis.com".
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
func (t *authTransport) getClientUniverseDomain() string {
- if t.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
+ if t.clientUniverseDomain != "" {
+ return t.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
}
- return t.clientUniverseDomain
+ return internal.DefaultUniverseDomain
}
// RoundTrip authorizes and authenticates the request with an
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
new file mode 100644
index 0000000000000..651bd61fbbc24
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
@@ -0,0 +1,66 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "log"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+var (
+ vmOnGCEOnce sync.Once
+ vmOnGCE bool
+)
+
+// OnComputeEngine returns whether the client is running on GCE.
+//
+// This is a copy of the gRPC internal googlecloud.OnGCE() func at:
+// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go
+// The functionality is similar to the metadata.OnGCE() func at:
+// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go
+//
+// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server.
+// In particular, OnComputeEngine() will return false on Serverless.
+func OnComputeEngine() bool {
+ vmOnGCEOnce.Do(func() {
+ mf, err := manufacturer()
+ if err != nil {
+ log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err)
+ return
+ }
+ vmOnGCE = isRunningOnGCE(mf, runtime.GOOS)
+ })
+ return vmOnGCE
+}
+
+// isRunningOnGCE checks whether the local system, without doing a network request, is
+// running on GCP.
+func isRunningOnGCE(manufacturer []byte, goos string) bool {
+ name := string(manufacturer)
+ switch goos {
+ case "linux":
+ name = strings.TrimSpace(name)
+ return name == "Google" || name == "Google Compute Engine"
+ case "windows":
+ name = strings.Replace(name, " ", "", -1)
+ name = strings.Replace(name, "\n", "", -1)
+ name = strings.Replace(name, "\r", "", -1)
+ return name == "Google"
+ default:
+ return false
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
new file mode 100644
index 0000000000000..af490bf4f490c
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
@@ -0,0 +1,22 @@
+//go:build !(linux || windows)
+// +build !linux,!windows
+
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+func manufacturer() ([]byte, error) {
+ return nil, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
new file mode 100644
index 0000000000000..d92178df86c27
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
@@ -0,0 +1,23 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import "os"
+
+const linuxProductNameFile = "/sys/class/dmi/id/product_name"
+
+func manufacturer() ([]byte, error) {
+ return os.ReadFile(linuxProductNameFile)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
new file mode 100644
index 0000000000000..16be9df3064bd
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
@@ -0,0 +1,46 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "errors"
+ "os/exec"
+ "regexp"
+ "strings"
+)
+
+const (
+ windowsCheckCommand = "powershell.exe"
+ windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS"
+ powershellOutputFilter = "Manufacturer"
+ windowsManufacturerRegex = ":(.*)"
+)
+
+func manufacturer() ([]byte, error) {
+ cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs)
+ out, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") {
+ if strings.HasPrefix(line, powershellOutputFilter) {
+ re := regexp.MustCompile(windowsManufacturerRegex)
+ name := re.FindString(line)
+ name = strings.TrimLeft(name, ":")
+ return []byte(name), nil
+ }
+ }
+ return nil, errors.New("cannot determine the machine's manufacturer")
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go
index 4308345eda335..66a51f19c73e4 100644
--- a/vendor/cloud.google.com/go/auth/internal/internal.go
+++ b/vendor/cloud.google.com/go/auth/internal/internal.go
@@ -38,8 +38,11 @@ const (
// QuotaProjectEnvVar is the environment variable for setting the quota
// project.
QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
- projectEnvVar = "GOOGLE_CLOUD_PROJECT"
- maxBodySize = 1 << 20
+ // UniverseDomainEnvVar is the environment variable for setting the default
+ // service domain for a given Cloud universe.
+ UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN"
+ projectEnvVar = "GOOGLE_CLOUD_PROJECT"
+ maxBodySize = 1 << 20
// DefaultUniverseDomain is the default value for universe domain.
// Universe domain is the default service domain for a given Cloud universe.
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
index 26e037c1a374b..f606888f12048 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
@@ -133,7 +133,11 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede
transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
if err != nil {
log.Printf("Loading MTLS MDS credentials failed: %v", err)
- return defaultTransportCreds, config.endpoint, nil
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return defaultTransportCreds, config.endpoint, nil
+ }
}
} else if config.s2aAddress != "" {
s2aAddr = config.s2aAddress
@@ -177,7 +181,11 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context,
transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
if err != nil {
log.Printf("Loading MTLS MDS credentials failed: %v", err)
- return config.clientCertSource, nil, nil
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
+ }
}
} else if config.s2aAddress != "" {
s2aAddr = config.s2aAddress
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 9594e1e2793c6..da7db19b1c6d1 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,19 @@
# Changes
+## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f))
+
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd))
+
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10)
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index 345080b729790..c160b4786bbdf 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -456,6 +456,9 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
code = res.StatusCode
}
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
+ if res != nil && res.Body != nil {
+ res.Body.Close()
+ }
if err := sleep(ctx, delay); err != nil {
return "", "", err
}
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
index bb412f8917e31..2e53f01230090 100644
--- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
+++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
@@ -17,10 +17,15 @@
package metadata
-import "syscall"
+import (
+ "errors"
+ "syscall"
+)
func init() {
// Initialize syscallRetryable to return true on transient socket-level
// errors. These errors are specific to Linux.
- syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
+ syscallRetryable = func(err error) bool {
+ return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED)
+ }
}
diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go
index 133ff68553f70..8644f614c864c 100644
--- a/vendor/cloud.google.com/go/doc.go
+++ b/vendor/cloud.google.com/go/doc.go
@@ -79,12 +79,15 @@ are also provided in all auto-generated libraries: for example,
cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
ctx := context.Background()
- // https://pkg.go.dev/golang.org/x/oauth2/google
- creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
+ // https://pkg.go.dev/cloud.google.com/go/auth/credentials
+ creds, err := credentials.DetectDefault(&credentials.DetectOptions{
+ Scopes: secretmanager.DefaultAuthScopes(),
+ CredentialsJSON: []byte("JSON creds")
+ }), secretmanager.DefaultAuthScopes()...)
if err != nil {
// TODO: handle error.
}
- client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
+ client, err := secretmanager.NewClient(ctx, option.WithAuthCredentials(creds))
if err != nil {
// TODO: handle error.
}
diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
index 54acae7cdc2ed..6b58b6a6f3b55 100644
--- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
+++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
@@ -179,6 +179,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/apihub/apiv1": {
+ "api_shortname": "apihub",
+ "distribution_name": "cloud.google.com/go/apihub/apiv1",
+ "description": "API hub API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apihub/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/apikeys/apiv2": {
"api_shortname": "apikeys",
"distribution_name": "cloud.google.com/go/apikeys/apiv2",
@@ -559,6 +569,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/bigquery/storage/apiv1alpha": {
+ "api_shortname": "bigquerystorage",
+ "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1alpha",
+ "description": "BigQuery Storage API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1alpha",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/bigquery/storage/apiv1beta1": {
"api_shortname": "bigquerystorage",
"distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1",
@@ -1009,6 +1029,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/datastore/apiv1": {
+ "api_shortname": "datastore",
+ "distribution_name": "cloud.google.com/go/datastore/apiv1",
+ "description": "Cloud Datastore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/datastream/apiv1": {
"api_shortname": "datastream",
"distribution_name": "cloud.google.com/go/datastream/apiv1",
@@ -1329,6 +1359,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/gkeconnect/gateway/apiv1": {
+ "api_shortname": "connectgateway",
+ "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1",
+ "description": "Connect Gateway API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/gkeconnect/gateway/apiv1beta1": {
"api_shortname": "connectgateway",
"distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1",
@@ -1352,7 +1392,7 @@
"cloud.google.com/go/gkemulticloud/apiv1": {
"api_shortname": "gkemulticloud",
"distribution_name": "cloud.google.com/go/gkemulticloud/apiv1",
- "description": "Anthos Multi-Cloud API",
+ "description": "GKE Multi-Cloud API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1",
@@ -1552,7 +1592,7 @@
"cloud.google.com/go/managedkafka/apiv1": {
"api_shortname": "managedkafka",
"distribution_name": "cloud.google.com/go/managedkafka/apiv1",
- "description": "Apache Kafka for BigQuery API",
+ "description": "Managed Service for Apache Kafka API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1",
@@ -1569,6 +1609,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/maps/areainsights/apiv1": {
+ "api_shortname": "areainsights",
+ "distribution_name": "cloud.google.com/go/maps/areainsights/apiv1",
+ "description": "Places Insights API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/areainsights/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/maps/fleetengine/apiv1": {
"api_shortname": "fleetengine",
"distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1",
diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
index 1e924a8340e69..73021df5391d6 100644
--- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
+++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
@@ -27,6 +27,9 @@
"apigeeregistry": {
"component": "apigeeregistry"
},
+ "apihub": {
+ "component": "apihub"
+ },
"apikeys": {
"component": "apikeys"
},
diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md
index e9fb55585b97c..0e2e2e6948f85 100644
--- a/vendor/cloud.google.com/go/storage/CHANGES.md
+++ b/vendor/cloud.google.com/go/storage/CHANGES.md
@@ -1,6 +1,51 @@
# Changes
+## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.43.0...storage/v1.44.0) (2024-10-03)
+
+
+### Features
+
+* **storage/dataflux:** Add dataflux interface ([#10748](https://github.com/googleapis/google-cloud-go/issues/10748)) ([cb7b0a1](https://github.com/googleapis/google-cloud-go/commit/cb7b0a1b285de9d4182155a123747419232dd35f))
+* **storage/dataflux:** Add range_splitter [#10748](https://github.com/googleapis/google-cloud-go/issues/10748) ([#10899](https://github.com/googleapis/google-cloud-go/issues/10899)) ([d49da26](https://github.com/googleapis/google-cloud-go/commit/d49da26be7dc52fad37c392c2876f62b1a5625a2))
+* **storage/dataflux:** Add worksteal algorithm to fast-listing ([#10913](https://github.com/googleapis/google-cloud-go/issues/10913)) ([015b52c](https://github.com/googleapis/google-cloud-go/commit/015b52c345df75408be3edcfda96d37145794f9f))
+* **storage/internal:** Add managed folder to testIamPermissions method ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+* **storage/transfermanager:** Add option to StripPrefix on directory download ([#10894](https://github.com/googleapis/google-cloud-go/issues/10894)) ([607534c](https://github.com/googleapis/google-cloud-go/commit/607534cdd5edf2d15d3de891cf6a0b6cbaa7d545))
+* **storage/transfermanager:** Add SkipIfExists option ([#10893](https://github.com/googleapis/google-cloud-go/issues/10893)) ([7daa1bd](https://github.com/googleapis/google-cloud-go/commit/7daa1bdc78844adac80f6378b1f6f2dd415b80a8))
+* **storage/transfermanager:** Checksum full object downloads ([#10569](https://github.com/googleapis/google-cloud-go/issues/10569)) ([c366c90](https://github.com/googleapis/google-cloud-go/commit/c366c908534ef09442f1f3e8a4f74bd545a474fb))
+* **storage:** Add direct google access side-effect imports by default ([#10757](https://github.com/googleapis/google-cloud-go/issues/10757)) ([9ad8324](https://github.com/googleapis/google-cloud-go/commit/9ad83248a7049c82580bc45d9685c329811bce88))
+* **storage:** Add full object checksum to reader.Attrs ([#10538](https://github.com/googleapis/google-cloud-go/issues/10538)) ([245d2ea](https://github.com/googleapis/google-cloud-go/commit/245d2eaddb4862da7c8d1892d5d462bf390adb2b))
+* **storage:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
+* **storage:** Add update time in bucketAttrs ([#10710](https://github.com/googleapis/google-cloud-go/issues/10710)) ([5f06ae1](https://github.com/googleapis/google-cloud-go/commit/5f06ae1a331c46ded47c96c205b3f1be92d64d29)), refs [#9361](https://github.com/googleapis/google-cloud-go/issues/9361)
+* **storage:** GA gRPC client ([#10859](https://github.com/googleapis/google-cloud-go/issues/10859)) ([c7a55a2](https://github.com/googleapis/google-cloud-go/commit/c7a55a26c645905317fe27505d503c338f50ee34))
+* **storage:** Introduce gRPC client-side metrics ([#10639](https://github.com/googleapis/google-cloud-go/issues/10639)) ([437bcb1](https://github.com/googleapis/google-cloud-go/commit/437bcb1e0b514959648eed36ba3963aa4fbeffc8))
+* **storage:** Support IncludeFoldersAsPrefixes for gRPC ([#10767](https://github.com/googleapis/google-cloud-go/issues/10767)) ([65bcc59](https://github.com/googleapis/google-cloud-go/commit/65bcc59a6c0753f8fbd66c8792bc69300e95ec62))
+
+
+### Bug Fixes
+
+* **storage/transfermanager:** Correct Attrs.StartOffset for sharded downloads ([#10512](https://github.com/googleapis/google-cloud-go/issues/10512)) ([01a5cbb](https://github.com/googleapis/google-cloud-go/commit/01a5cbba6d9d9f425f045b58fa16d8c85804c29c))
+* **storage:** Add retryalways policy to encryption test ([#10644](https://github.com/googleapis/google-cloud-go/issues/10644)) ([59cfd12](https://github.com/googleapis/google-cloud-go/commit/59cfd12ce5650279c99787da4a273db1e3253c76)), refs [#10567](https://github.com/googleapis/google-cloud-go/issues/10567)
+* **storage:** Add unknown host to retriable errors ([#10619](https://github.com/googleapis/google-cloud-go/issues/10619)) ([4ec0452](https://github.com/googleapis/google-cloud-go/commit/4ec0452a393341b1036ac6e1e7287843f097d978))
+* **storage:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+* **storage:** Bump google.golang.org/[email protected] ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+* **storage:** Check for grpc NotFound error in HMAC test ([#10645](https://github.com/googleapis/google-cloud-go/issues/10645)) ([3c8e88a](https://github.com/googleapis/google-cloud-go/commit/3c8e88a085bab3142dfff6ef9a8e49c29a5c877d))
+* **storage:** Disable grpc metrics using emulator ([#10870](https://github.com/googleapis/google-cloud-go/issues/10870)) ([35ad73d](https://github.com/googleapis/google-cloud-go/commit/35ad73d3be5485ac592e2ef1ea6c0854f1eff4a0))
+* **storage:** Retry gRPC DEADLINE_EXCEEDED errors ([#10635](https://github.com/googleapis/google-cloud-go/issues/10635)) ([0018415](https://github.com/googleapis/google-cloud-go/commit/0018415295a5fd964b923db6a4785e9eed46a2e2))
+* **storage:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+* **storage:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
+
+
+### Performance Improvements
+
+* **storage:** GRPC zerocopy codec ([#10888](https://github.com/googleapis/google-cloud-go/issues/10888)) ([aeba28f](https://github.com/googleapis/google-cloud-go/commit/aeba28ffffcd82ac5540e45247112bdacc5c530d))
+
+
+### Documentation
+
+* **storage/internal:** Clarify possible objectAccessControl roles ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+* **storage/internal:** Update dual-region bucket link ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+
## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03)
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
index d582a60d0e837..3eded017831eb 100644
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ b/vendor/cloud.google.com/go/storage/bucket.go
@@ -416,6 +416,10 @@ type BucketAttrs struct {
// This field is read-only.
Created time.Time
+ // Updated is the time at which the bucket was last modified.
+ // This field is read-only.
+ Updated time.Time
+
// VersioningEnabled reports whether this bucket has versioning enabled.
VersioningEnabled bool
@@ -824,6 +828,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
DefaultEventBasedHold: b.DefaultEventBasedHold,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
+ Updated: convertTime(b.Updated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
ACL: toBucketACLRules(b.Acl),
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
@@ -861,6 +866,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
DefaultEventBasedHold: b.GetDefaultEventBasedHold(),
StorageClass: b.GetStorageClass(),
Created: b.GetCreateTime().AsTime(),
+ Updated: b.GetUpdateTime().AsTime(),
VersioningEnabled: b.GetVersioning().GetEnabled(),
ACL: toBucketACLRulesFromProto(b.GetAcl()),
DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()),
diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go
index bbe89276a432c..aebba22517579 100644
--- a/vendor/cloud.google.com/go/storage/client.go
+++ b/vendor/cloud.google.com/go/storage/client.go
@@ -122,7 +122,7 @@ type settings struct {
gax []gax.CallOption
// idempotent indicates if the call is idempotent or not when considering
- // if the call should be retired or not.
+ // if the call should be retried or not.
idempotent bool
// clientOption is a set of option.ClientOption to be used during client
@@ -132,6 +132,8 @@ type settings struct {
// userProject is the user project that should be billed for the request.
userProject string
+
+ metricsContext *metricsContext
}
func initSettings(opts ...storageOption) *settings {
diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go
index c274c762ea4ec..4fcfb7326487b 100644
--- a/vendor/cloud.google.com/go/storage/doc.go
+++ b/vendor/cloud.google.com/go/storage/doc.go
@@ -331,14 +331,14 @@ to add a [custom audit logging] header:
// Use client as usual with the context and the additional headers will be sent.
client.Bucket("my-bucket").Attrs(ctx)
-# Experimental gRPC API
+# gRPC API
-This package includes support for the Cloud Storage gRPC API, which is currently
-in preview. This implementation uses gRPC rather than the current JSON & XML
-APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC
-team at [email protected] with a list of GCS buckets you would like to
-allowlist to access this API. The Go Storage gRPC library is not yet generally
-available, so it may be subject to breaking changes.
+This package includes support for the Cloud Storage gRPC API. The
+implementation uses gRPC rather than the Default
+JSON & XML APIs to make requests to Cloud Storage.
+The Go Storage gRPC client is generally available.
+The Notifications, Serivce Account HMAC
+and GetServiceAccount RPCs are not supported through the gRPC client.
To create a client which will use gRPC, use the alternate constructor:
@@ -349,15 +349,43 @@ To create a client which will use gRPC, use the alternate constructor:
}
// Use client as usual.
-If the application is running within GCP, users may get better performance by
-enabling Direct Google Access (enabling requests to skip some proxy steps). To enable,
-set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
-the following side-effect imports to your application:
+Using the gRPC API inside GCP with a bucket in the same region can allow for
+[Direct Connectivity] (enabling requests to skip some proxy steps and reducing
+response latency). A warning is emmitted if gRPC is not used within GCP to
+warn that Direct Connectivity could not be initialized. Direct Connectivity
+is not required to access the gRPC API.
- import (
- _ "google.golang.org/grpc/balancer/rls"
- _ "google.golang.org/grpc/xds/googledirectpath"
- )
+Dependencies for the gRPC API may slightly increase the size of binaries for
+applications depending on this package. If you are not using gRPC, you can use
+the build tag `disable_grpc_modules` to opt out of these dependencies and
+reduce the binary size.
+
+The gRPC client emits metrics by default and will export the
+gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
+[Google Cloud Monitoring]. The metrics are accessible through Cloud Monitoring
+API and you incur no additional cost for publishing the metrics. Google Cloud
+Support can use this information to more quickly diagnose problems related to
+GCS and gRPC.
+Sending this data does not incur any billing charges, and requires minimal
+CPU (a single RPC every minute) or memory (a few KiB to batch the
+telemetry).
+
+To access the metrics you can view them through Cloud Monitoring
+[metric explorer] with the prefix `storage.googleapis.com/client`. Metrics are emitted
+every minute.
+
+You can disable metrics using the following example when creating a new gRPC
+client using [WithDisabledClientMetrics].
+
+The metrics exporter uses Cloud Monitoring API which determines
+project ID and credentials doing the following:
+
+* Project ID is determined using OTel Resource Detector for the environment
+otherwise it falls back to the project provided by [google.FindCredentials].
+
+* Credentials are determined using [Application Default Credentials]. The
+principal must have `roles/monitoring.metricWriter` role granted. If not a
+logged warning will be emitted. Subsequent are silenced to prevent noisy logs.
# Storage Control API
@@ -366,6 +394,11 @@ and Managed Folder operations) are supported via the autogenerated Storage Contr
client, which is available as a subpackage in this module. See package docs at
[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs.
+[Application Default Credentials]: https://cloud.google.com/docs/authentication/application-default-credentials
+[google.FindCredentials]: https://pkg.go.dev/golang.org/x/oauth2/google#FindDefaultCredentials
+[gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
+[gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
+[Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
@@ -375,5 +408,7 @@ client, which is available as a subpackage in this module. See package docs at
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata
[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2
+[metric explorer]: https://console.cloud.google.com/projectselector/monitoring/metrics-explorer
+[Direct Connectivity]: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#direct-connectivity
*/
package storage // import "cloud.google.com/go/storage"
diff --git a/vendor/cloud.google.com/go/storage/dynamic_delay.go b/vendor/cloud.google.com/go/storage/dynamic_delay.go
new file mode 100644
index 0000000000000..5d4c42fb82bfc
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/dynamic_delay.go
@@ -0,0 +1,154 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "time"
+)
+
+// dynamicDelay dynamically calculates the delay at a fixed percentile, based on
+// delay samples.
+//
+// dynamicDelay is goroutine-safe.
+type dynamicDelay struct {
+ increaseFactor float64
+ decreaseFactor float64
+ minDelay time.Duration
+ maxDelay time.Duration
+ value time.Duration
+
+ // Guards the value
+ mu *sync.RWMutex
+}
+
+// NewDynamicDelay returns a dynamicDelay.
+//
+// targetPercentile is the desired percentile to be computed. For example, a
+// targetPercentile of 0.99 computes the delay at the 99th percentile. Must be
+// in the range [0, 1].
+//
+// increaseRate (must be > 0) determines how many increase calls it takes for
+// Value to double.
+//
+// initialDelay is the start value of the delay.
+//
+// decrease can never lower the delay past minDelay, increase can never raise
+// the delay past maxDelay.
+func newDynamicDelay(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) (*dynamicDelay, error) {
+ if targetPercentile < 0 || targetPercentile > 1 {
+ return nil, fmt.Errorf("invalid targetPercentile (%v): must be within [0, 1]", targetPercentile)
+ }
+ if increaseRate <= 0 {
+ return nil, fmt.Errorf("invalid increaseRate (%v): must be > 0", increaseRate)
+ }
+ if minDelay >= maxDelay {
+ return nil, fmt.Errorf("invalid minDelay (%v) and maxDelay (%v) combination: minDelay must be smaller than maxDelay", minDelay, maxDelay)
+ }
+ if initialDelay < minDelay {
+ initialDelay = minDelay
+ }
+ if initialDelay > maxDelay {
+ initialDelay = maxDelay
+ }
+
+ // Compute increaseFactor and decreaseFactor such that:
+ // (increaseFactor ^ (1 - targetPercentile)) * (decreaseFactor ^ targetPercentile) = 1
+ increaseFactor := math.Exp(math.Log(2) / increaseRate)
+ if increaseFactor < 1.001 {
+ increaseFactor = 1.001
+ }
+ decreaseFactor := math.Exp(-math.Log(increaseFactor) * (1 - targetPercentile) / targetPercentile)
+ if decreaseFactor > 0.9999 {
+ decreaseFactor = 0.9999
+ }
+
+ return &dynamicDelay{
+ increaseFactor: increaseFactor,
+ decreaseFactor: decreaseFactor,
+ minDelay: minDelay,
+ maxDelay: maxDelay,
+ value: initialDelay,
+ mu: &sync.RWMutex{},
+ }, nil
+}
+
+func (d *dynamicDelay) unsafeIncrease() {
+ v := time.Duration(float64(d.value) * d.increaseFactor)
+ if v > d.maxDelay {
+ d.value = d.maxDelay
+ } else {
+ d.value = v
+ }
+}
+
+// increase notes that the operation took longer than the delay returned by Value.
+func (d *dynamicDelay) increase() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.unsafeIncrease()
+}
+
+func (d *dynamicDelay) unsafeDecrease() {
+ v := time.Duration(float64(d.value) * d.decreaseFactor)
+ if v < d.minDelay {
+ d.value = d.minDelay
+ } else {
+ d.value = v
+ }
+}
+
+// decrease notes that the operation completed before the delay returned by getValue.
+func (d *dynamicDelay) decrease() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.unsafeDecrease()
+}
+
+// update updates the delay value depending on the specified latency.
+func (d *dynamicDelay) update(latency time.Duration) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ if latency > d.value {
+ d.unsafeIncrease()
+ } else {
+ d.unsafeDecrease()
+ }
+}
+
+// getValue returns the desired delay to wait before retry the operation.
+func (d *dynamicDelay) getValue() time.Duration {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ return d.value
+}
+
+// PrintDelay prints the state of delay, helpful in debugging.
+func (d *dynamicDelay) printDelay() {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ fmt.Println("IncreaseFactor: ", d.increaseFactor)
+ fmt.Println("DecreaseFactor: ", d.decreaseFactor)
+ fmt.Println("MinDelay: ", d.minDelay)
+ fmt.Println("MaxDelay: ", d.maxDelay)
+ fmt.Println("Value: ", d.value)
+}
diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go
index d81a17b6b04d6..eb327a3eeb48e 100644
--- a/vendor/cloud.google.com/go/storage/grpc_client.go
+++ b/vendor/cloud.google.com/go/storage/grpc_client.go
@@ -16,11 +16,12 @@ package storage
import (
"context"
- "encoding/base64"
+ "encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
+ "log"
"net/url"
"os"
@@ -36,6 +37,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protowire"
@@ -95,10 +97,11 @@ func defaultGRPCOptions() []option.ClientOption {
option.WithEndpoint(host),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
+ WithDisabledClientMetrics(),
)
} else {
// Only enable DirectPath when the emulator is not being targeted.
- defaults = append(defaults, internaloption.EnableDirectPath(true))
+ defaults = append(defaults, internaloption.EnableDirectPath(true), internaloption.EnableDirectPathXds())
}
return defaults
@@ -124,6 +127,15 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl
return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads")
}
+ if !config.disableClientMetrics {
+ // Do not fail client creation if enabling metrics fails.
+ if metricsContext, err := enableClientMetrics(ctx, s); err == nil {
+ s.metricsContext = metricsContext
+ s.clientOption = append(s.clientOption, metricsContext.clientOpts...)
+ } else {
+ log.Printf("Failed to enable client metrics: %v", err)
+ }
+ }
g, err := gapic.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, err
@@ -136,26 +148,17 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl
}
func (c *grpcStorageClient) Close() error {
+ if c.settings.metricsContext != nil {
+ c.settings.metricsContext.close()
+ }
return c.raw.Close()
}
// Top-level methods.
+// GetServiceAccount is not supported in the gRPC client.
func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.GetServiceAccountRequest{
- Project: toProjectResource(project),
- }
- var resp *storagepb.ServiceAccount
- err := run(ctx, func(ctx context.Context) error {
- var err error
- resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return "", err
- }
- return resp.EmailAddress, err
+ return "", errMethodNotSupported
}
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) {
@@ -432,16 +435,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
MatchGlob: it.query.MatchGlob,
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
SoftDeleted: it.query.SoftDeleted,
+ IncludeFoldersAsPrefixes: it.query.IncludeFoldersAsPrefixes,
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
fetch := func(pageSize int, pageToken string) (token string, err error) {
- // IncludeFoldersAsPrefixes is not supported for gRPC
- // TODO: remove this when support is added in the proto.
- if it.query.IncludeFoldersAsPrefixes {
- return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC")
- }
var objects []*storagepb.Object
var gitr *gapic.ObjectIterator
err = run(it.ctx, func(ctx context.Context) error {
@@ -959,37 +958,48 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
return r, nil
}
-// bytesCodec is a grpc codec which permits receiving messages as either
-// protobuf messages, or as raw []bytes.
-type bytesCodec struct {
- encoding.Codec
+// Custom codec to be used for unmarshaling ReadObjectResponse messages.
+// This is used to avoid a copy of object data in proto.Unmarshal.
+type bytesCodecV2 struct {
}
-func (bytesCodec) Marshal(v any) ([]byte, error) {
+var _ encoding.CodecV2 = bytesCodecV2{}
+
+// Marshal is used to encode messages to send for bytesCodecV2. Since we are only
+// using this to send ReadObjectRequest messages we don't need to recycle buffers
+// here.
+func (bytesCodecV2) Marshal(v any) (mem.BufferSlice, error) {
vv, ok := v.(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
}
- return proto.Marshal(vv)
+ var data mem.BufferSlice
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ return data, nil
}
-func (bytesCodec) Unmarshal(data []byte, v any) error {
+// Unmarshal is used for data received for ReadObjectResponse. We want to preserve
+// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal.
+func (bytesCodecV2) Unmarshal(data mem.BufferSlice, v any) error {
switch v := v.(type) {
- case *[]byte:
- // If gRPC could recycle the data []byte after unmarshaling (through
- // buffer pools), we would need to make a copy here.
+ case *mem.BufferSlice:
*v = data
+ // Pick up a reference to the data so that it is not freed while decoding.
+ data.Ref()
return nil
case proto.Message:
- return proto.Unmarshal(data, v)
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ return proto.Unmarshal(buf.ReadOnlyData(), v)
default:
- return fmt.Errorf("can not unmarshal type %T", v)
+ return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v)
}
}
-func (bytesCodec) Name() string {
- // If this isn't "", then gRPC sets the content-subtype of the call to this
- // value and we get errors.
+func (bytesCodecV2) Name() string {
return ""
}
@@ -1000,7 +1010,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
s := callSettings(c.settings, opts...)
s.gax = append(s.gax, gax.WithGRPCOptions(
- grpc.ForceCodec(bytesCodec{}),
+ grpc.ForceCodecV2(bytesCodecV2{}),
))
if s.userProject != "" {
@@ -1018,8 +1028,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
req.Generation = params.gen
}
- var databuf []byte
-
// Define a function that initiates a Read with offset and length, assuming
// we have already read seen bytes.
reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) {
@@ -1045,18 +1053,19 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
}
var stream storagepb.Storage_ReadObjectClient
- var msg *storagepb.ReadObjectResponse
var err error
+ var decoder *readResponseDecoder
err = run(cc, func(ctx context.Context) error {
- stream, err = c.raw.ReadObject(cc, req, s.gax...)
+ stream, err = c.raw.ReadObject(ctx, req, s.gax...)
if err != nil {
return err
}
// Receive the message into databuf as a wire-encoded message so we can
// use a custom decoder to avoid an extra copy at the protobuf layer.
- err := stream.RecvMsg(&databuf)
+ databufs := mem.BufferSlice{}
+ err := stream.RecvMsg(&databufs)
// These types of errors show up on the Recv call, rather than the
// initialization of the stream via ReadObject above.
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
@@ -1066,22 +1075,26 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
return err
}
// Use a custom decoder that uses protobuf unmarshalling for all
- // fields except the checksummed data.
- // Subsequent receives in Read calls will skip all protobuf
- // unmarshalling and directly read the content from the gRPC []byte
- // response, since only the first call will contain other fields.
- msg, err = readFullObjectResponse(databuf)
-
+ // fields except the object data. Object data is handled separately
+ // to avoid a copy.
+ decoder = &readResponseDecoder{
+ databufs: databufs,
+ }
+ err = decoder.readFullObjectResponse()
return err
}, s.retry, s.idempotent)
if err != nil {
// Close the stream context we just created to ensure we don't leak
// resources.
cancel()
+ // Free any buffers.
+ if decoder != nil && decoder.databufs != nil {
+ decoder.databufs.Free()
+ }
return nil, nil, err
}
- return &readStreamResponse{stream, msg}, cancel, nil
+ return &readStreamResponse{stream, decoder}, cancel, nil
}
res, cancel, err := reopen(0)
@@ -1091,7 +1104,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
// The first message was Recv'd on stream open, use it to populate the
// object metadata.
- msg := res.response
+ msg := res.decoder.msg
obj := msg.GetMetadata()
// This is the size of the entire object, even if only a range was requested.
size := obj.GetSize()
@@ -1101,9 +1114,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
wantCRC uint32
checkCRC bool
)
- if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 {
+ if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil {
+ if params.offset == 0 && params.length < 0 {
+ checkCRC = true
+ }
wantCRC = checksums.GetCrc32C()
- checkCRC = true
}
r = &Reader{
@@ -1115,18 +1130,17 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
LastModified: obj.GetUpdateTime().AsTime(),
Metageneration: obj.GetMetageneration(),
Generation: obj.GetGeneration(),
+ CRC32C: wantCRC,
},
reader: &gRPCReader{
stream: res.stream,
reopen: reopen,
cancel: cancel,
size: size,
- // Store the content from the first Recv in the
- // client buffer for reading later.
- leftovers: msg.GetChecksummedData().GetContent(),
+ // Preserve the decoder to read out object data when Read/WriteTo is called.
+ currMsg: res.decoder,
settings: s,
zeroRange: params.length == 0,
- databuf: databuf,
wantCRC: wantCRC,
checkCRC: checkCRC,
},
@@ -1293,213 +1307,53 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str
return res.Permissions, nil
}
-// HMAC Key methods.
+// HMAC Key methods are not implemented in gRPC client.
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.GetHmacKeyRequest{
- AccessId: accessID,
- Project: toProjectResource(project),
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var metadata *storagepb.HmacKeyMetadata
- err := run(ctx, func(ctx context.Context) error {
- var err error
- metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toHMACKeyFromProto(metadata), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator {
- s := callSettings(c.settings, opts...)
- req := &storagepb.ListHmacKeysRequest{
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- ShowDeletedKeys: showDeletedKeys,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
it := &HMACKeysIterator{
ctx: ctx,
- projectID: project,
- retry: s.retry,
+ projectID: "",
+ retry: nil,
}
- fetch := func(pageSize int, pageToken string) (token string, err error) {
- var hmacKeys []*storagepb.HmacKeyMetadata
- err = run(it.ctx, func(ctx context.Context) error {
- gitr := c.raw.ListHmacKeys(ctx, req, s.gax...)
- hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return "", err
- }
- for _, hkmd := range hmacKeys {
- hk := toHMACKeyFromProto(hkmd)
- it.hmacKeys = append(it.hmacKeys, hk)
- }
-
- return token, nil
+ fetch := func(_ int, _ string) (token string, err error) {
+ return "", errMethodNotSupported
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
- func() int { return len(it.hmacKeys) - it.index },
- func() interface{} {
- prev := it.hmacKeys
- it.hmacKeys = it.hmacKeys[:0]
- it.index = 0
- return prev
- })
+ func() int { return 0 },
+ func() interface{} { return nil },
+ )
return it
}
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- hk := &storagepb.HmacKeyMetadata{
- AccessId: accessID,
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- State: string(attrs.State),
- Etag: attrs.Etag,
- }
- var paths []string
- fieldMask := &fieldmaskpb.FieldMask{
- Paths: paths,
- }
- if attrs.State != "" {
- fieldMask.Paths = append(fieldMask.Paths, "state")
- }
- req := &storagepb.UpdateHmacKeyRequest{
- HmacKey: hk,
- UpdateMask: fieldMask,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var metadata *storagepb.HmacKeyMetadata
- err := run(ctx, func(ctx context.Context) error {
- var err error
- metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toHMACKeyFromProto(metadata), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.CreateHmacKeyRequest{
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var res *storagepb.CreateHmacKeyResponse
- err := run(ctx, func(ctx context.Context) error {
- var err error
- res, err = c.raw.CreateHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- key := toHMACKeyFromProto(res.Metadata)
- key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes)
-
- return key, nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error {
- s := callSettings(c.settings, opts...)
- req := &storagepb.DeleteHmacKeyRequest{
- AccessId: accessID,
- Project: toProjectResource(project),
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- return run(ctx, func(ctx context.Context) error {
- return c.raw.DeleteHmacKey(ctx, req, s.gax...)
- }, s.retry, s.idempotent)
+ return errMethodNotSupported
}
-// Notification methods.
+// Notification methods are not implemented in gRPC client.
func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- req := &storagepb.ListNotificationConfigsRequest{
- Parent: bucketResourceName(globalProjectAlias, bucket),
- }
- var notifications []*storagepb.NotificationConfig
- err = run(ctx, func(ctx context.Context) error {
- gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...)
- for {
- // PageSize is not set and fallbacks to the API default pageSize of 100.
- items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken())
- if err != nil {
- return err
- }
- notifications = append(notifications, items...)
- // If there are no more results, nextPageToken is empty and err is nil.
- if nextPageToken == "" {
- return err
- }
- req.PageToken = nextPageToken
- }
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
-
- return notificationsToMapFromProto(notifications), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- req := &storagepb.CreateNotificationConfigRequest{
- Parent: bucketResourceName(globalProjectAlias, bucket),
- NotificationConfig: toProtoNotification(n),
- }
- var pbn *storagepb.NotificationConfig
- err = run(ctx, func(ctx context.Context) error {
- var err error
- pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toNotificationFromProto(pbn), err
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- req := &storagepb.DeleteNotificationConfigRequest{Name: id}
- return run(ctx, func(ctx context.Context) error {
- return c.raw.DeleteNotificationConfig(ctx, req, s.gax...)
- }, s.retry, s.idempotent)
+ return errMethodNotSupported
}
// setUserProjectMetadata appends a project ID to the outgoing Context metadata
@@ -1512,8 +1366,8 @@ func setUserProjectMetadata(ctx context.Context, project string) context.Context
}
type readStreamResponse struct {
- stream storagepb.Storage_ReadObjectClient
- response *storagepb.ReadObjectResponse
+ stream storagepb.Storage_ReadObjectClient
+ decoder *readResponseDecoder
}
type gRPCReader struct {
@@ -1522,7 +1376,7 @@ type gRPCReader struct {
stream storagepb.Storage_ReadObjectClient
reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error)
leftovers []byte
- databuf []byte
+ currMsg *readResponseDecoder // decoder for the current message
cancel context.CancelFunc
settings *settings
checkCRC bool // should we check the CRC?
@@ -1565,18 +1419,21 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
}
var n int
- // Read leftovers and return what was available to conform to the Reader
+
+ // If there is data remaining in the current message, return what was
+ // available to conform to the Reader
// interface: https://pkg.go.dev/io#Reader.
- if len(r.leftovers) > 0 {
- n = copy(p, r.leftovers)
+ if !r.currMsg.done {
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(n)
- r.updateCRC(p[:n])
- r.leftovers = r.leftovers[n:]
return n, nil
}
// Attempt to Recv the next message on the stream.
- content, err := r.recv()
+ // This will update r.currMsg with the decoder for the new message.
+ err := r.recv()
if err != nil {
return 0, err
}
@@ -1588,16 +1445,11 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
- n = copy(p[n:], content)
- leftover := len(content) - n
- if leftover > 0 {
- // Wasn't able to copy all of the data in the message, store for
- // future Read calls.
- r.leftovers = content[n:]
- }
- r.seen += int64(n)
- r.updateCRC(p[:n])
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(n)
return n, nil
}
@@ -1624,14 +1476,14 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// Track bytes written during before call.
var alreadySeen = r.seen
- // Write any leftovers to the stream. There will be some leftovers from the
+ // Write any already received message to the stream. There will be some leftovers from the
// original NewRangeReader call.
- if len(r.leftovers) > 0 {
- // Write() will write the entire leftovers slice unless there is an error.
- written, err := w.Write(r.leftovers)
+ if r.currMsg != nil && !r.currMsg.done {
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(written)
- r.updateCRC(r.leftovers)
- r.leftovers = nil
+ r.currMsg = nil
if err != nil {
return r.seen - alreadySeen, err
}
@@ -1642,7 +1494,7 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// Attempt to receive the next message on the stream.
// Will terminate with io.EOF once data has all come through.
// recv() handles stream reopening and retry logic so no need for retries here.
- msg, err := r.recv()
+ err := r.recv()
if err != nil {
if err == io.EOF {
// We are done; check the checksum if necessary and return.
@@ -1658,9 +1510,10 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
- written, err := w.Write(msg)
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(written)
- r.updateCRC(msg)
if err != nil {
return r.seen - alreadySeen, err
}
@@ -1669,12 +1522,13 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
}
// Close cancels the read stream's context in order for it to be closed and
-// collected.
+// collected, and frees any currently in use buffers.
func (r *gRPCReader) Close() error {
if r.cancel != nil {
r.cancel()
}
r.stream = nil
+ r.currMsg = nil
return nil
}
@@ -1689,8 +1543,9 @@ func (r *gRPCReader) Close() error {
//
// The last error received is the one that is returned, which could be from
// an attempt to reopen the stream.
-func (r *gRPCReader) recv() ([]byte, error) {
- err := r.stream.RecvMsg(&r.databuf)
+func (r *gRPCReader) recv() error {
+ databufs := mem.BufferSlice{}
+ err := r.stream.RecvMsg(&databufs)
var shouldRetry = ShouldRetry
if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
@@ -1700,16 +1555,16 @@ func (r *gRPCReader) recv() ([]byte, error) {
// This will "close" the existing stream and immediately attempt to
// reopen the stream, but will backoff if further attempts are necessary.
// Reopening the stream Recvs the first message, so if retrying is
- // successful, the next logical chunk will be returned.
- msg, err := r.reopenStream()
- return msg.GetChecksummedData().GetContent(), err
+ // successful, r.currMsg will be updated to include the new data.
+ return r.reopenStream()
}
if err != nil {
- return nil, err
+ return err
}
- return readObjectResponseContent(r.databuf)
+ r.currMsg = &readResponseDecoder{databufs: databufs}
+ return r.currMsg.readFullObjectResponse()
}
// ReadObjectResponse field and subfield numbers.
@@ -1722,21 +1577,297 @@ const (
metadataField = protowire.Number(4)
)
-// readObjectResponseContent returns the checksummed_data.content field of a
-// ReadObjectResponse message, or an error if the message is invalid.
-// This can be used on recvs of objects after the first recv, since only the
-// first message will contain non-data fields.
-func readObjectResponseContent(b []byte) ([]byte, error) {
- checksummedData, err := readProtoBytes(b, checksummedDataField)
+// readResponseDecoder is a wrapper on the raw message, used to decode one message
+// without copying object data. It also has methods to write out the resulting object
+// data to the user application.
+type readResponseDecoder struct {
+ databufs mem.BufferSlice // raw bytes of the message being processed
+ // Decoding offsets
+ off uint64 // offset in the messsage relative to the data as a whole
+ currBuf int // index of the current buffer being processed
+ currOff uint64 // offset in the current buffer
+ // Processed data
+ msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated
+ dataOffsets bufferSliceOffsets // offsets of the object data in the message.
+ done bool // true if the data has been completely read.
+}
+
+type bufferSliceOffsets struct {
+ startBuf, endBuf int // indices of start and end buffers of object data in the msg
+ startOff, endOff uint64 // offsets within these buffers where the data starts and ends.
+ currBuf int // index of current buffer being read out to the user application.
+ currOff uint64 // offset of read in current buffer.
+}
+
+// peek ahead 10 bytes from the current offset in the databufs. This will return a
+// slice of the current buffer if the bytes are all in one buffer, but will copy
+// the bytes into a new buffer if the distance is split across buffers. Use this
+// to allow protowire methods to be used to parse tags & fixed values.
+// The max length of a varint tag is 10 bytes, see
+// https://protobuf.dev/programming-guides/encoding/#varints . Other int types
+// are shorter.
+func (d *readResponseDecoder) peek() []byte {
+ b := d.databufs[d.currBuf].ReadOnlyData()
+ // Check if the tag will fit in the current buffer. If not, copy the next 10
+ // bytes into a new buffer to ensure that we can read the tag correctly
+ // without it being divided between buffers.
+ tagBuf := b[d.currOff:]
+ remainingInBuf := len(tagBuf)
+ // If we have less than 10 bytes remaining and are not in the final buffer,
+ // copy up to 10 bytes ahead from the next buffer.
+ if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 {
+ tagBuf = d.copyNextBytes(10)
+ }
+ return tagBuf
+}
+
+// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the
+// buffers overall. Does not advance offsets.
+func (d *readResponseDecoder) copyNextBytes(n int) []byte {
+ remaining := n
+ if r := d.databufs.Len() - int(d.off); r < remaining {
+ remaining = r
+ }
+ currBuf := d.currBuf
+ currOff := d.currOff
+ var buf []byte
+ for remaining > 0 {
+ b := d.databufs[currBuf].ReadOnlyData()
+ remainingInCurr := len(b[currOff:])
+ if remainingInCurr < remaining {
+ buf = append(buf, b[currOff:]...)
+ remaining -= remainingInCurr
+ currBuf++
+ currOff = 0
+ } else {
+ buf = append(buf, b[currOff:currOff+uint64(remaining)]...)
+ remaining = 0
+ }
+ }
+ return buf
+}
+
+// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we
+// go past the end of the data.
+func (d *readResponseDecoder) advanceOffset(n uint64) error {
+ remaining := n
+ for remaining > 0 {
+ remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff
+ if remainingInCurr <= remaining {
+ remaining -= remainingInCurr
+ d.currBuf++
+ d.currOff = 0
+ } else {
+ d.currOff += remaining
+ remaining = 0
+ }
+ }
+ // If we have advanced past the end of the buffers, something went wrong.
+ if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) {
+ return errors.New("decoding: truncated message, cannot advance offset")
+ }
+ d.off += n
+ return nil
+
+}
+
+// This copies object data from the message into the buffer and returns the number of
+// bytes copied. The data offsets are incremented in the message. The updateCRC
+// function is called on the copied bytes.
+func (d *readResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0
+ }
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ n := copy(p, b)
+ updateCRC(b[:n])
+ d.dataOffsets.currOff += uint64(n)
+
+ // We've read all the data from this message. Free the underlying buffers.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff {
+ d.done = true
+ d.databufs.Free()
+ }
+ // We are at the end of the current buffer
+ if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) {
+ d.dataOffsets.currOff = 0
+ d.dataOffsets.currBuf++
+ }
+ return n
+}
+
+func (d *readResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0, nil
+ }
+ var written int64
+ for !d.done {
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ var n int
+ // Write all remaining data from the current buffer
+ n, err := w.Write(b)
+ written += int64(n)
+ updateCRC(b)
+ if err != nil {
+ return written, err
+ }
+ d.dataOffsets.currOff = 0
+ // We've read all the data from this message.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ d.done = true
+ d.databufs.Free()
+ } else {
+ d.dataOffsets.currBuf++
+ }
+ }
+ return written, nil
+}
+
+// Consume the next available tag in the input data and return the field number and type.
+// Advances the relevant offsets in the data.
+func (d *readResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf)
+ if tagLength < 0 {
+ return 0, 0, protowire.ParseError(tagLength)
+ }
+ // Update the offsets and current buffer depending on the tag length.
+ if err := d.advanceOffset(uint64(tagLength)); err != nil {
+ return 0, 0, fmt.Errorf("consuming tag: %w", err)
+ }
+ return fieldNum, fieldType, nil
+}
+
+// Consume a varint that represents the length of a bytes field. Return the length of
+// the data, and advance the offsets by the length of the varint.
+func (d *readResponseDecoder) consumeVarint() (uint64, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ dataLength, tagLength := protowire.ConsumeVarint(tagBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return dataLength, nil
+}
+
+func (d *readResponseDecoder) consumeFixed32() (uint32, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed32(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+func (d *readResponseDecoder) consumeFixed64() (uint64, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed64(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+// Consume any field values up to the end offset provided and don't return anything.
+// This is used to skip any values which are not going to be used.
+// msgEndOff is indexed in terms of the overall data across all buffers.
+func (d *readResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error {
+ // reimplement protowire.ConsumeFieldValue without the extra case for groups (which
+ // are are complicted and not a thing in proto3).
+ var err error
+ switch fieldType {
+ case protowire.VarintType:
+ _, err = d.consumeVarint()
+ case protowire.Fixed32Type:
+ _, err = d.consumeFixed32()
+ case protowire.Fixed64Type:
+ _, err = d.consumeFixed64()
+ case protowire.BytesType:
+ _, err = d.consumeBytes()
+ default:
+ return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum)
+ }
if err != nil {
- return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err)
+ return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err)
}
- content, err := readProtoBytes(checksummedData, checksummedDataContentField)
+
+ return nil
+}
+
+// Consume a bytes field from the input. Returns offsets for the data in the buffer slices
+// and an error.
+func (d *readResponseDecoder) consumeBytes() (bufferSliceOffsets, error) {
+ // m is the length of the data past the tag.
+ m, err := d.consumeVarint()
if err != nil {
- return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err)
+ return bufferSliceOffsets{}, fmt.Errorf("consuming bytes field: %w", err)
+ }
+ offsets := bufferSliceOffsets{
+ startBuf: d.currBuf,
+ startOff: d.currOff,
+ currBuf: d.currBuf,
+ currOff: d.currOff,
}
- return content, nil
+ // Advance offsets to lengths of bytes field and capture where we end.
+ d.advanceOffset(m)
+ offsets.endBuf = d.currBuf
+ offsets.endOff = d.currOff
+ return offsets, nil
+}
+
+// Consume a bytes field from the input and copy into a new buffer if
+// necessary (if the data is split across buffers in databuf). This can be
+// used to leverage proto.Unmarshal for small bytes fields (i.e. anything
+// except object data).
+func (d *readResponseDecoder) consumeBytesCopy() ([]byte, error) {
+ // m is the length of the bytes data.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return nil, fmt.Errorf("consuming varint: %w", err)
+ }
+ // Copy the data into a buffer and advance the offset
+ b := d.copyNextBytes(int(m))
+ if err := d.advanceOffset(m); err != nil {
+ return nil, fmt.Errorf("advancing offset: %w", err)
+ }
+ return b, nil
}
// readFullObjectResponse returns the ReadObjectResponse that is encoded in the
@@ -1746,21 +1877,17 @@ func readObjectResponseContent(b []byte) ([]byte, error) {
// This function is essentially identical to proto.Unmarshal, except it aliases
// the data in the input []byte. If the proto library adds a feature to
// Unmarshal that does that, this function can be dropped.
-func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) {
+func (d *readResponseDecoder) readFullObjectResponse() error {
msg := &storagepb.ReadObjectResponse{}
// Loop over the entire message, extracting fields as we go. This does not
// handle field concatenation, in which the contents of a single field
// are split across multiple protobuf tags.
- off := 0
- for off < len(b) {
- // Consume the next tag. This will tell us which field is next in the
- // buffer, its type, and how much space it takes up.
- fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:])
- if fieldLength < 0 {
- return nil, protowire.ParseError(fieldLength)
+ for d.off < uint64(d.databufs.Len()) {
+ fieldNum, fieldType, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming next tag: %w", err)
}
- off += fieldLength
// Unmarshal the field according to its type. Only fields that are not
// nil will be present.
@@ -1769,142 +1896,95 @@ func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) {
// The ChecksummedData field was found. Initialize the struct.
msg.ChecksummedData = &storagepb.ChecksummedData{}
- // Get the bytes corresponding to the checksummed data.
- fieldContent, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n))
+ bytesFieldLen, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("consuming bytes: %v", err)
}
- off += n
-
- // Get the nested fields. We need to do this manually as it contains
- // the object content bytes.
- contentOff := 0
- for contentOff < len(fieldContent) {
- gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:])
- if n < 0 {
- return nil, protowire.ParseError(n)
+
+ var contentEndOff = d.off + bytesFieldLen
+ for d.off < contentEndOff {
+ gotNum, gotTyp, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming checksummedData tag: %w", err)
}
- contentOff += n
switch {
case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType:
- // Get the content bytes.
- bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n))
+ // Get the offsets of the content bytes.
+ d.dataOffsets, err = d.consumeBytes()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err)
}
- msg.ChecksummedData.Content = bytes
- contentOff += n
case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type:
- v, n := protowire.ConsumeFixed32(fieldContent[contentOff:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n))
+ v, err := d.consumeFixed32()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err)
}
msg.ChecksummedData.Crc32C = &v
- contentOff += n
default:
- n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:])
- if n < 0 {
- return nil, protowire.ParseError(n)
+ err := d.consumeFieldValue(gotNum, gotTyp)
+ if err != nil {
+ return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err)
}
- contentOff += n
}
}
case fieldNum == objectChecksumsField && fieldType == protowire.BytesType:
// The field was found. Initialize the struct.
msg.ObjectChecksums = &storagepb.ObjectChecksums{}
-
- // Get the bytes corresponding to the checksums.
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n))
+ // Consume the bytes and copy them into a single buffer if they are split across buffers.
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", err)
}
- off += n
-
// Unmarshal.
- if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil {
- return nil, err
+ if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil {
+ return err
}
case fieldNum == contentRangeField && fieldType == protowire.BytesType:
msg.ContentRange = &storagepb.ContentRange{}
-
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n))
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", err)
}
- off += n
-
- if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil {
- return nil, err
+ if err := proto.Unmarshal(buf, msg.ContentRange); err != nil {
+ return err
}
case fieldNum == metadataField && fieldType == protowire.BytesType:
msg.Metadata = &storagepb.Object{}
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n))
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", err)
}
- off += n
- if err := proto.Unmarshal(bytes, msg.Metadata); err != nil {
- return nil, err
+ if err := proto.Unmarshal(buf, msg.Metadata); err != nil {
+ return err
}
default:
- fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:])
- if fieldLength < 0 {
- return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength))
- }
- off += fieldLength
- }
- }
-
- return msg, nil
-}
-
-// readProtoBytes returns the contents of the protobuf field with number num
-// and type bytes from a wire-encoded message. If the field cannot be found,
-// the returned slice will be nil and no error will be returned.
-//
-// It does not handle field concatenation, in which the contents of a single field
-// are split across multiple protobuf tags. Encoded data containing split fields
-// of this form is technically permissable, but uncommon.
-func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) {
- off := 0
- for off < len(b) {
- gotNum, gotTyp, n := protowire.ConsumeTag(b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- off += n
- if gotNum == num && gotTyp == protowire.BytesType {
- b, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
+ err := d.consumeFieldValue(fieldNum, fieldType)
+ if err != nil {
+ return fmt.Errorf("invalid field in ReadObjectResponse: %w", err)
}
- return b, nil
}
- n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- off += n
}
- return nil, nil
+ d.msg = msg
+ return nil
}
// reopenStream "closes" the existing stream and attempts to reopen a stream and
// sets the Reader's stream and cancelStream properties in the process.
-func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) {
+func (r *gRPCReader) reopenStream() error {
// Close existing stream and initialize new stream with updated offset.
r.Close()
res, cancel, err := r.reopen(r.seen)
if err != nil {
- return nil, err
+ return err
}
r.stream = res.stream
+ r.currMsg = res.decoder
r.cancel = cancel
- return res.response, nil
+ return nil
}
func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter {
diff --git a/vendor/cloud.google.com/go/storage/grpc_dp.go b/vendor/cloud.google.com/go/storage/grpc_dp.go
new file mode 100644
index 0000000000000..d3422733497f5
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_dp.go
@@ -0,0 +1,22 @@
+//go:build !disable_grpc_modules
+
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ _ "google.golang.org/grpc/balancer/rls"
+ _ "google.golang.org/grpc/xds/googledirectpath"
+)
diff --git a/vendor/cloud.google.com/go/storage/grpc_metrics.go b/vendor/cloud.google.com/go/storage/grpc_metrics.go
new file mode 100644
index 0000000000000..460a9d0a2b8f9
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_metrics.go
@@ -0,0 +1,275 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "strings"
+ "time"
+
+ mexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric"
+ "github.com/google/uuid"
+ "go.opentelemetry.io/contrib/detectors/gcp"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/stats/opentelemetry"
+)
+
+const (
+ monitoredResourceName = "storage.googleapis.com/Client"
+ metricPrefix = "storage.googleapis.com/client/"
+)
+
+func latencyHistogramBoundaries() []float64 {
+ boundaries := []float64{}
+ boundary := 0.0
+ increment := 0.002
+ // 2ms buckets for first 100ms, so we can have higher resolution for uploads and downloads in the 100 KiB range
+ for i := 0; i < 50; i++ {
+ boundaries = append(boundaries, boundary)
+ // increment by 2ms
+ boundary += increment
+ }
+ // For the remaining buckets do 10 10ms, 10 20ms, and so on, up until 5 minutes
+ for i := 0; i < 150 && boundary < 300; i++ {
+ boundaries = append(boundaries, boundary)
+ if i != 0 && i%10 == 0 {
+ increment *= 2
+ }
+ boundary += increment
+ }
+ return boundaries
+}
+
+func sizeHistogramBoundaries() []float64 {
+ kb := 1024.0
+ mb := 1024.0 * kb
+ gb := 1024.0 * mb
+ boundaries := []float64{}
+ boundary := 0.0
+ increment := 128 * kb
+ // 128 KiB increments up to 4MiB, then exponential growth
+ for len(boundaries) < 200 && boundary <= 16*gb {
+ boundaries = append(boundaries, boundary)
+ boundary += increment
+ if boundary >= 4*mb {
+ increment *= 2
+ }
+ }
+ return boundaries
+}
+
+func metricFormatter(m metricdata.Metrics) string {
+ return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/")
+}
+
+func gcpAttributeExpectedDefaults() []attribute.KeyValue {
+ return []attribute.KeyValue{
+ {Key: "location", Value: attribute.StringValue("global")},
+ {Key: "cloud_platform", Value: attribute.StringValue("unknown")},
+ {Key: "host_id", Value: attribute.StringValue("unknown")}}
+}
+
+// Added to help with tests
+type preparedResource struct {
+ projectToUse string
+ resource *resource.Resource
+}
+
+func newPreparedResource(ctx context.Context, project string, resourceOptions []resource.Option) (*preparedResource, error) {
+ detectedAttrs, err := resource.New(ctx, resourceOptions...)
+ if err != nil {
+ return nil, err
+ }
+ preparedResource := &preparedResource{}
+ s := detectedAttrs.Set()
+ p, present := s.Value("cloud.account.id")
+ if present {
+ preparedResource.projectToUse = p.AsString()
+ } else {
+ preparedResource.projectToUse = project
+ }
+ updates := []attribute.KeyValue{}
+ for _, kv := range gcpAttributeExpectedDefaults() {
+ if val, present := s.Value(kv.Key); !present || val.AsString() == "" {
+ updates = append(updates, attribute.KeyValue{Key: kv.Key, Value: kv.Value})
+ }
+ }
+ r, err := resource.New(
+ ctx,
+ resource.WithAttributes(
+ attribute.KeyValue{Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)},
+ attribute.KeyValue{Key: "instance_id", Value: attribute.StringValue(uuid.New().String())},
+ attribute.KeyValue{Key: "project_id", Value: attribute.StringValue(project)},
+ attribute.KeyValue{Key: "api", Value: attribute.StringValue("grpc")},
+ ),
+ resource.WithAttributes(detectedAttrs.Attributes()...),
+ // Last duplicate key / value wins
+ resource.WithAttributes(updates...),
+ )
+ if err != nil {
+ return nil, err
+ }
+ preparedResource.resource = r
+ return preparedResource, nil
+}
+
+type metricsContext struct {
+ // project used by exporter
+ project string
+ // client options passed to gRPC channels
+ clientOpts []option.ClientOption
+ // instance of metric reader used by gRPC client-side metrics
+ provider *metric.MeterProvider
+ // clean func to call when closing gRPC client
+ close func()
+}
+
+func createHistogramView(name string, boundaries []float64) metric.View {
+ return metric.NewView(metric.Instrument{
+ Name: name,
+ Kind: metric.InstrumentKindHistogram,
+ }, metric.Stream{
+ Name: name,
+ Aggregation: metric.AggregationExplicitBucketHistogram{Boundaries: boundaries},
+ })
+}
+
+func newGRPCMetricContext(ctx context.Context, project string) (*metricsContext, error) {
+ preparedResource, err := newPreparedResource(ctx, project, []resource.Option{resource.WithDetectors(gcp.NewDetector())})
+ if err != nil {
+ return nil, err
+ }
+ // Implementation requires a project, if one is not determined possibly user
+ // credentials. Then we will fail stating gRPC Metrics require a project-id.
+ if project == "" && preparedResource.projectToUse != "" {
+ return nil, fmt.Errorf("google cloud project is required to start client-side metrics")
+ }
+ // If projectTouse isn't the same as project provided to Storage client, then
+ // emit a log stating which project is being used to emit metrics to.
+ if project != preparedResource.projectToUse {
+ log.Printf("The Project ID configured for metrics is %s, but the Project ID of the storage client is %s. Make sure that the service account in use has the required metric writing role (roles/monitoring.metricWriter) in the project projectIdToUse or metrics will not be written.", preparedResource.projectToUse, project)
+ }
+ meOpts := []mexporter.Option{
+ mexporter.WithProjectID(preparedResource.projectToUse),
+ mexporter.WithMetricDescriptorTypeFormatter(metricFormatter),
+ mexporter.WithCreateServiceTimeSeries(),
+ mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"})}
+ exporter, err := mexporter.New(meOpts...)
+ if err != nil {
+ return nil, err
+ }
+ // Metric views update histogram boundaries to be relevant to GCS
+ // otherwise default OTel histogram boundaries are used.
+ metricViews := []metric.View{
+ createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()),
+ createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()),
+ createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries()),
+ }
+ provider := metric.NewMeterProvider(
+ metric.WithReader(metric.NewPeriodicReader(&exporterLogSuppressor{exporter: exporter}, metric.WithInterval(time.Minute))),
+ metric.WithResource(preparedResource.resource),
+ metric.WithView(metricViews...),
+ )
+ mo := opentelemetry.MetricsOptions{
+ MeterProvider: provider,
+ Metrics: opentelemetry.DefaultMetrics().Add(
+ "grpc.lb.wrr.rr_fallback",
+ "grpc.lb.wrr.endpoint_weight_not_yet_usable",
+ "grpc.lb.wrr.endpoint_weight_stale",
+ "grpc.lb.wrr.endpoint_weights",
+ "grpc.lb.rls.cache_entries",
+ "grpc.lb.rls.cache_size",
+ "grpc.lb.rls.default_target_picks",
+ "grpc.lb.rls.target_picks",
+ "grpc.lb.rls.failed_picks"),
+ OptionalLabels: []string{"grpc.lb.locality"},
+ }
+ opts := []option.ClientOption{
+ option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})),
+ }
+ context := &metricsContext{
+ project: preparedResource.projectToUse,
+ clientOpts: opts,
+ provider: provider,
+ close: createShutdown(ctx, provider),
+ }
+ return context, nil
+}
+
+func enableClientMetrics(ctx context.Context, s *settings) (*metricsContext, error) {
+ var project string
+ c, err := transport.Creds(ctx, s.clientOption...)
+ if err == nil {
+ project = c.ProjectID
+ }
+ // Enable client-side metrics for gRPC
+ metricsContext, err := newGRPCMetricContext(ctx, project)
+ if err != nil {
+ return nil, fmt.Errorf("gRPC Metrics: %w", err)
+ }
+ return metricsContext, nil
+}
+
+func createShutdown(ctx context.Context, provider *metric.MeterProvider) func() {
+ return func() {
+ provider.Shutdown(ctx)
+ }
+}
+
+// Silences permission errors after initial error is emitted to prevent
+// chatty logs.
+type exporterLogSuppressor struct {
+ exporter metric.Exporter
+ emittedFailure bool
+}
+
+// Implements OTel SDK metric.Exporter interface to prevent noisy logs from
+// lack of credentials after initial failure.
+// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/[email protected]#Exporter
+func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if err := e.exporter.Export(ctx, rm); err != nil && !e.emittedFailure {
+ if strings.Contains(err.Error(), "PermissionDenied") {
+ e.emittedFailure = true
+ return fmt.Errorf("gRPC metrics failed due permission issue: %w", err)
+ }
+ return err
+ }
+ return nil
+}
+
+func (e *exporterLogSuppressor) Temporality(k metric.InstrumentKind) metricdata.Temporality {
+ return e.exporter.Temporality(k)
+}
+
+func (e *exporterLogSuppressor) Aggregation(k metric.InstrumentKind) metric.Aggregation {
+ return e.exporter.Aggregation(k)
+}
+
+func (e *exporterLogSuppressor) ForceFlush(ctx context.Context) error {
+ return e.exporter.ForceFlush(ctx)
+}
+
+func (e *exporterLogSuppressor) Shutdown(ctx context.Context) error {
+ return e.exporter.Shutdown(ctx)
+}
diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go
index f7811a5d140f0..2387fd33c703f 100644
--- a/vendor/cloud.google.com/go/storage/hmac.go
+++ b/vendor/cloud.google.com/go/storage/hmac.go
@@ -20,7 +20,6 @@ import (
"fmt"
"time"
- "cloud.google.com/go/storage/internal/apiv2/storagepb"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"
)
@@ -103,6 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
//
// Options such as UserProjectForHMACKeys can be used to set the
// userProject to be billed against for operations.
+// Note: gRPC is not supported.
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -118,6 +118,7 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
// Only inactive HMAC keys can be deleted.
// After deletion, a key cannot be used to authenticate requests.
+// Note: gRPC is not supported.
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -158,23 +159,8 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro
return hmKey, nil
}
-func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
- if pbmd == nil {
- return nil
- }
-
- return &HMACKey{
- AccessID: pbmd.GetAccessId(),
- ID: pbmd.GetId(),
- State: HMACState(pbmd.GetState()),
- ProjectID: pbmd.GetProject(),
- CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
- UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
- ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
- }
-}
-
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
+// Note: gRPC is not supported.
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) {
if projectID == "" {
return nil, errors.New("storage: expecting a non-blank projectID")
@@ -203,6 +189,7 @@ type HMACKeyAttrsToUpdate struct {
}
// Update mutates the HMACKey referred to by accessID.
+// Note: gRPC is not supported.
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) {
if au.State != Active && au.State != Inactive {
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
@@ -237,6 +224,7 @@ type HMACKeysIterator struct {
// ListHMACKeys returns an iterator for listing HMACKeys.
//
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
+// Note: gRPC is not supported.
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
desc := new(hmacKeyDesc)
for _, opt := range opts {
diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go
index 0e213a6632a3e..82d9566b9f36d 100644
--- a/vendor/cloud.google.com/go/storage/http_client.go
+++ b/vendor/cloud.google.com/go/storage/http_client.go
@@ -857,14 +857,7 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
reopen := readerReopen(ctx, req.Header, params, s,
func(ctx context.Context) (*http.Response, error) {
- // Set custom headers passed in via the context. This is only required for XML;
- // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively.
- ctxHeaders := callctx.HeadersFromContext(ctx)
- for k, vals := range ctxHeaders {
- for _, v := range vals {
- req.Header.Set(k, v)
- }
- }
+ setHeadersFromCtx(ctx, req.Header)
return c.hc.Do(req.WithContext(ctx))
},
func() error { return setConditionsHeaders(req.Header, params.conds) },
@@ -1422,18 +1415,20 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
}
} else {
size = res.ContentLength
- // Check the CRC iff all of the following hold:
- // - We asked for content (length != 0).
- // - We got all the content (status != PartialContent).
- // - The server sent a CRC header.
- // - The Go http stack did not uncompress the file.
- // - We were not served compressed data that was uncompressed on download.
- // The problem with the last two cases is that the CRC will not match -- GCS
- // computes it on the compressed contents, but we compute it on the
- // uncompressed contents.
- if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
- crc, checkCRC = parseCRC32c(res)
- }
+ }
+
+ // Check the CRC iff all of the following hold:
+ // - We asked for content (length != 0).
+ // - We got all the content (status != PartialContent).
+ // - The server sent a CRC header.
+ // - The Go http stack did not uncompress the file.
+ // - We were not served compressed data that was uncompressed on download.
+ // The problem with the last two cases is that the CRC will not match -- GCS
+ // computes it on the compressed contents, but we compute it on the
+ // uncompressed contents.
+ crc, checkCRC = parseCRC32c(res)
+ if params.length == 0 || res.StatusCode == http.StatusPartialContent || res.Uncompressed || uncompressedByServer(res) {
+ checkCRC = false
}
remain := res.ContentLength
@@ -1470,6 +1465,8 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
StartOffset: startOffset,
Generation: params.gen,
Metageneration: metaGen,
+ CRC32C: crc,
+ Decompressed: res.Uncompressed || uncompressedByServer(res),
}
return &Reader{
Attrs: attrs,
@@ -1484,3 +1481,30 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
},
}, nil
}
+
+// setHeadersFromCtx sets custom headers passed in via the context on the header,
+// replacing any header with the same key (which avoids duplicating invocation headers).
+// This is only required for XML; for gRPC & JSON requests this is handled in
+// the GAPIC and Apiary layers respectively.
+func setHeadersFromCtx(ctx context.Context, header http.Header) {
+ ctxHeaders := callctx.HeadersFromContext(ctx)
+ for k, vals := range ctxHeaders {
+ // Merge x-goog-api-client values into a single space-separated value.
+ if strings.EqualFold(k, xGoogHeaderKey) {
+ alreadySetValues := header.Values(xGoogHeaderKey)
+ vals = append(vals, alreadySetValues...)
+
+ if len(vals) > 0 {
+ xGoogHeader := vals[0]
+ for _, v := range vals[1:] {
+ xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
+ }
+ header.Set(k, xGoogHeader)
+ }
+ } else {
+ for _, v := range vals {
+ header.Set(k, v)
+ }
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
new file mode 100644
index 0000000000000..f2822035c41b4
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
@@ -0,0 +1,50 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package storage
+
+import (
+ "iter"
+
+ storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb"
+ "github.com/googleapis/gax-go/v2/iterator"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *BucketIterator) All() iter.Seq2[*storagepb.Bucket, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *HmacKeyMetadataIterator) All() iter.Seq2[*storagepb.HmacKeyMetadata, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *NotificationConfigIterator) All() iter.Seq2[*storagepb.NotificationConfig, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ObjectIterator) All() iter.Seq2[*storagepb.Object, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
index 5e2a8f0ad5bec..869f3b1fbcd73 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
@@ -17,19 +17,15 @@
// Package storage is an auto-generated package for the
// Cloud Storage API.
//
-// Stop. This folder is likely not what you are looking for. This folder
-// contains protocol buffer definitions for an API only accessible to select
-// customers. Customers not participating should not depend on this file.
-// Please contact Google Cloud sales if you are interested. Unless told
-// otherwise by a Google Cloud representative, do not use or otherwise rely
-// on any of the contents of this folder. If you would like to use Cloud
-// Storage, please consult our official documentation (at
+// This folder contains protocol buffer definitions for an API only
+// accessible to select customers. Customers not participating should not
+// depend on this file. Please contact Google Cloud sales if you are
+// interested. Unless told otherwise by a Google Cloud representative, do not
+// use or otherwise rely on any of the contents of this folder. If you would
+// like to use Cloud Storage, please consult our official documentation (at
// https://cloud.google.com/storage/docs/apis) for details on our XML and
// JSON APIs, or else consider one of our client libraries (at
-// https://cloud.google.com/storage/docs/reference/libraries). This API
-// defined in this folder is unreleased and may shut off, break, or fail at
-// any time for any users who are not registered as a part of a private
-// preview program.
+// https://cloud.google.com/storage/docs/reference/libraries).
//
// # General documentation
//
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
index 82ec5db902b93..5611f1e9e7095 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
@@ -50,10 +50,6 @@ type CallOptions struct {
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
UpdateBucket []gax.CallOption
- DeleteNotificationConfig []gax.CallOption
- GetNotificationConfig []gax.CallOption
- CreateNotificationConfig []gax.CallOption
- ListNotificationConfigs []gax.CallOption
ComposeObject []gax.CallOption
DeleteObject []gax.CallOption
RestoreObject []gax.CallOption
@@ -73,6 +69,10 @@ type CallOptions struct {
GetHmacKey []gax.CallOption
ListHmacKeys []gax.CallOption
UpdateHmacKey []gax.CallOption
+ DeleteNotificationConfig []gax.CallOption
+ GetNotificationConfig []gax.CallOption
+ CreateNotificationConfig []gax.CallOption
+ ListNotificationConfigs []gax.CallOption
}
func defaultGRPCClientOptions() []option.ClientOption {
@@ -84,6 +84,7 @@ func defaultGRPCClientOptions() []option.ClientOption {
internaloption.WithDefaultAudience("https://storage.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -208,7 +209,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteNotificationConfig: []gax.CallOption{
+ ComposeObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -221,7 +222,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetNotificationConfig: []gax.CallOption{
+ DeleteObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -234,7 +235,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- CreateNotificationConfig: []gax.CallOption{
+ RestoreObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -247,7 +248,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ListNotificationConfigs: []gax.CallOption{
+ CancelResumableWrite: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -260,7 +261,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ComposeObject: []gax.CallOption{
+ GetObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -273,8 +274,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteObject: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
+ ReadObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -286,7 +286,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- RestoreObject: []gax.CallOption{
+ UpdateObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -299,8 +299,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- CancelResumableWrite: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
+ WriteObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -312,8 +311,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetObject: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
+ BidiWriteObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -325,7 +323,8 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ReadObject: []gax.CallOption{
+ ListObjects: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -337,7 +336,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- UpdateObject: []gax.CallOption{
+ RewriteObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -350,7 +349,8 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- WriteObject: []gax.CallOption{
+ StartResumableWrite: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -362,7 +362,8 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- BidiWriteObject: []gax.CallOption{
+ QueryWriteStatus: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -374,7 +375,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ListObjects: []gax.CallOption{
+ GetServiceAccount: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -387,7 +388,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- RewriteObject: []gax.CallOption{
+ CreateHmacKey: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -400,7 +401,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- StartResumableWrite: []gax.CallOption{
+ DeleteHmacKey: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -413,7 +414,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- QueryWriteStatus: []gax.CallOption{
+ GetHmacKey: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -426,7 +427,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetServiceAccount: []gax.CallOption{
+ ListHmacKeys: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -439,7 +440,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- CreateHmacKey: []gax.CallOption{
+ UpdateHmacKey: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -452,7 +453,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteHmacKey: []gax.CallOption{
+ DeleteNotificationConfig: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -465,7 +466,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetHmacKey: []gax.CallOption{
+ GetNotificationConfig: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -478,7 +479,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ListHmacKeys: []gax.CallOption{
+ CreateNotificationConfig: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -491,7 +492,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- UpdateHmacKey: []gax.CallOption{
+ ListNotificationConfigs: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -521,10 +522,6 @@ type internalClient interface {
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error)
- DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error
- GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
- CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
- ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator
ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error
RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
@@ -544,6 +541,10 @@ type internalClient interface {
GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error)
ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator
UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error)
+ DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error
+ GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
+ CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
+ ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator
}
// Client is a client for interacting with Cloud Storage API.
@@ -641,11 +642,13 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques
return c.internalClient.SetIamPolicy(ctx, req, opts...)
}
-// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if
-// any, are held by the caller.
+// TestIamPermissions tests a set of permissions on the given bucket, object, or managed folder
+// to see which, if any, are held by the caller.
// The resource field in the request should be
-// projects/_/buckets/{bucket} for a bucket or
-// projects/_/buckets/{bucket}/objects/{object} for an object.
+// projects/_/buckets/{bucket} for a bucket,
+// projects/_/buckets/{bucket}/objects/{object} for an object, or
+// projects/_/buckets/{bucket}/managedFolders/{managedFolder}
+// for a managed folder.
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
@@ -655,29 +658,6 @@ func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRe
return c.internalClient.UpdateBucket(ctx, req, opts...)
}
-// DeleteNotificationConfig permanently deletes a NotificationConfig.
-func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteNotificationConfig(ctx, req, opts...)
-}
-
-// GetNotificationConfig view a NotificationConfig.
-func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- return c.internalClient.GetNotificationConfig(ctx, req, opts...)
-}
-
-// CreateNotificationConfig creates a NotificationConfig for a given bucket.
-// These NotificationConfigs, when triggered, publish messages to the
-// specified Pub/Sub topics. See
-// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications).
-func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- return c.internalClient.CreateNotificationConfig(ctx, req, opts...)
-}
-
-// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket.
-func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
- return c.internalClient.ListNotificationConfigs(ctx, req, opts...)
-}
-
// ComposeObject concatenates a list of existing objects into a new object in the same
// bucket.
func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
@@ -849,35 +829,78 @@ func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWrite
}
// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account.
+//
+// Deprecated: GetServiceAccount may be removed in a future version.
func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
return c.internalClient.GetServiceAccount(ctx, req, opts...)
}
// CreateHmacKey creates a new HMAC key for the given service account.
+//
+// Deprecated: CreateHmacKey may be removed in a future version.
func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
return c.internalClient.CreateHmacKey(ctx, req, opts...)
}
// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state.
+//
+// Deprecated: DeleteHmacKey may be removed in a future version.
func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
return c.internalClient.DeleteHmacKey(ctx, req, opts...)
}
// GetHmacKey gets an existing HMAC key metadata for the given id.
+//
+// Deprecated: GetHmacKey may be removed in a future version.
func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
return c.internalClient.GetHmacKey(ctx, req, opts...)
}
// ListHmacKeys lists HMAC keys under a given project with the additional filters provided.
+//
+// Deprecated: ListHmacKeys may be removed in a future version.
func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator {
return c.internalClient.ListHmacKeys(ctx, req, opts...)
}
// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE.
+//
+// Deprecated: UpdateHmacKey may be removed in a future version.
func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
return c.internalClient.UpdateHmacKey(ctx, req, opts...)
}
+// DeleteNotificationConfig permanently deletes a NotificationConfig.
+//
+// Deprecated: DeleteNotificationConfig may be removed in a future version.
+func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteNotificationConfig(ctx, req, opts...)
+}
+
+// GetNotificationConfig view a NotificationConfig.
+//
+// Deprecated: GetNotificationConfig may be removed in a future version.
+func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
+ return c.internalClient.GetNotificationConfig(ctx, req, opts...)
+}
+
+// CreateNotificationConfig creates a NotificationConfig for a given bucket.
+// These NotificationConfigs, when triggered, publish messages to the
+// specified Pub/Sub topics. See
+// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications).
+//
+// Deprecated: CreateNotificationConfig may be removed in a future version.
+func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
+ return c.internalClient.CreateNotificationConfig(ctx, req, opts...)
+}
+
+// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket.
+//
+// Deprecated: ListNotificationConfigs may be removed in a future version.
+func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
+ return c.internalClient.ListNotificationConfigs(ctx, req, opts...)
+}
+
// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
@@ -1198,6 +1221,9 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
+ if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/managedFolders(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
+ }
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
@@ -1246,138 +1272,6 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
return resp, nil
}
-func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...)
- var resp *storagepb.NotificationConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...)
- var resp *storagepb.NotificationConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...)
- it := &NotificationConfigIterator{}
- req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) {
- resp := &storagepb.ListNotificationConfigsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
routingHeaders := ""
routingHeadersMap := make(map[string]string)
@@ -1917,3 +1811,135 @@ func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHma
}
return resp, nil
}
+
+func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
+ routingHeaders := ""
+ routingHeadersMap := make(map[string]string)
+ if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
+ }
+ for headerName, headerValue := range routingHeadersMap {
+ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
+ }
+ routingHeaders = strings.TrimSuffix(routingHeaders, "&")
+ hds := []string{"x-goog-request-params", routingHeaders}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
+ routingHeaders := ""
+ routingHeadersMap := make(map[string]string)
+ if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
+ }
+ for headerName, headerValue := range routingHeadersMap {
+ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
+ }
+ routingHeaders = strings.TrimSuffix(routingHeaders, "&")
+ hds := []string{"x-goog-request-params", routingHeaders}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...)
+ var resp *storagepb.NotificationConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
+ routingHeaders := ""
+ routingHeadersMap := make(map[string]string)
+ if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
+ }
+ for headerName, headerValue := range routingHeadersMap {
+ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
+ }
+ routingHeaders = strings.TrimSuffix(routingHeaders, "&")
+ hds := []string{"x-goog-request-params", routingHeaders}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...)
+ var resp *storagepb.NotificationConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
+ routingHeaders := ""
+ routingHeadersMap := make(map[string]string)
+ if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
+ }
+ for headerName, headerValue := range routingHeadersMap {
+ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
+ }
+ routingHeaders = strings.TrimSuffix(routingHeaders, "&")
+ hds := []string{"x-goog-request-params", routingHeaders}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...)
+ it := &NotificationConfigIterator{}
+ req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) {
+ resp := &storagepb.ListNotificationConfigsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
index aeb7512f4ab3a..5c0e784517a77 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
@@ -177,7 +177,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber {
// Deprecated: Use ServiceConstants_Values.Descriptor instead.
func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0}
}
// Request message for DeleteBucket.
@@ -743,18 +743,41 @@ func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
return nil
}
-// Request message for DeleteNotificationConfig.
-type DeleteNotificationConfigRequest struct {
+// Request message for ComposeObject.
+type ComposeObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The parent bucket of the NotificationConfig.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. Properties of the resulting object.
+ Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"`
+ // The list of source objects that will be concatenated into a single object.
+ SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"`
+ // Apply a predefined set of access controls to the destination object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Resource name of the Cloud KMS key, of the form
+ // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
+ // that will be used to encrypt the object. Overrides the object
+ // metadata's `kms_key_name` value, if any.
+ KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // The checksums of the complete object. This will be validated against the
+ // combined checksums of the component objects.
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,10,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
}
-func (x *DeleteNotificationConfigRequest) Reset() {
- *x = DeleteNotificationConfigRequest{}
+func (x *ComposeObjectRequest) Reset() {
+ *x = ComposeObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -762,13 +785,13 @@ func (x *DeleteNotificationConfigRequest) Reset() {
}
}
-func (x *DeleteNotificationConfigRequest) String() string {
+func (x *ComposeObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*DeleteNotificationConfigRequest) ProtoMessage() {}
+func (*ComposeObjectRequest) ProtoMessage() {}
-func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -780,32 +803,104 @@ func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead.
+func (*ComposeObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7}
}
-func (x *DeleteNotificationConfigRequest) GetName() string {
+func (x *ComposeObjectRequest) GetDestination() *Object {
if x != nil {
- return x.Name
+ return x.Destination
+ }
+ return nil
+}
+
+func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject {
+ if x != nil {
+ return x.SourceObjects
+ }
+ return nil
+}
+
+func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string {
+ if x != nil {
+ return x.DestinationPredefinedAcl
}
return ""
}
-// Request message for GetNotificationConfig.
-type GetNotificationConfigRequest struct {
+func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
+ }
+ return 0
+}
+
+func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *ComposeObjectRequest) GetKmsKey() string {
+ if x != nil {
+ return x.KmsKey
+ }
+ return ""
+}
+
+func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
+func (x *ComposeObjectRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
+ }
+ return nil
+}
+
+// Message for deleting an object.
+// `bucket` and `object` **must** be set.
+type DeleteObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The parent bucket of the NotificationConfig.
- // Format:
- // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. Name of the bucket in which the object resides.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the finalized object to delete.
+ // Note: If you want to delete an unfinalized resumable upload please use
+ // `CancelResumableWrite`.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // If present, permanently deletes a specific revision of this object (as
+ // opposed to the latest version, the default).
+ Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *GetNotificationConfigRequest) Reset() {
- *x = GetNotificationConfigRequest{}
+func (x *DeleteObjectRequest) Reset() {
+ *x = DeleteObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -813,13 +908,13 @@ func (x *GetNotificationConfigRequest) Reset() {
}
}
-func (x *GetNotificationConfigRequest) String() string {
+func (x *DeleteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetNotificationConfigRequest) ProtoMessage() {}
+func (*DeleteObjectRequest) ProtoMessage() {}
-func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -831,32 +926,105 @@ func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead.
+func (*DeleteObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8}
}
-func (x *GetNotificationConfigRequest) GetName() string {
+func (x *DeleteObjectRequest) GetBucket() string {
if x != nil {
- return x.Name
+ return x.Bucket
}
return ""
}
-// Request message for CreateNotificationConfig.
-type CreateNotificationConfigRequest struct {
+func (x *DeleteObjectRequest) GetObject() string {
+ if x != nil {
+ return x.Object
+ }
+ return ""
+}
+
+func (x *DeleteObjectRequest) GetGeneration() int64 {
+ if x != nil {
+ return x.Generation
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
+// Message for restoring an object.
+// `bucket`, `object`, and `generation` **must** be set.
+type RestoreObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The bucket to which this NotificationConfig belongs.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. Properties of the NotificationConfig to be inserted.
- NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"`
+ // Required. Name of the bucket in which the object resides.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the object to restore.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // Required. The specific revision of the object to restore.
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // If false or unset, the bucket's default object ACL will be used.
+ // If true, copy the source object's access controls.
+ // Return an error if bucket has UBLA enabled.
+ CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *CreateNotificationConfigRequest) Reset() {
- *x = CreateNotificationConfigRequest{}
+func (x *RestoreObjectRequest) Reset() {
+ *x = RestoreObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -864,13 +1032,13 @@ func (x *CreateNotificationConfigRequest) Reset() {
}
}
-func (x *CreateNotificationConfigRequest) String() string {
+func (x *RestoreObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*CreateNotificationConfigRequest) ProtoMessage() {}
+func (*RestoreObjectRequest) ProtoMessage() {}
-func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -882,47 +1050,88 @@ func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead.
+func (*RestoreObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9}
}
-func (x *CreateNotificationConfigRequest) GetParent() string {
+func (x *RestoreObjectRequest) GetBucket() string {
if x != nil {
- return x.Parent
+ return x.Bucket
}
return ""
}
-func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig {
+func (x *RestoreObjectRequest) GetObject() string {
if x != nil {
- return x.NotificationConfig
+ return x.Object
+ }
+ return ""
+}
+
+func (x *RestoreObjectRequest) GetGeneration() int64 {
+ if x != nil {
+ return x.Generation
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *RestoreObjectRequest) GetCopySourceAcl() bool {
+ if x != nil && x.CopySourceAcl != nil {
+ return *x.CopySourceAcl
+ }
+ return false
+}
+
+func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
}
return nil
}
-// Request message for ListNotifications.
-type ListNotificationConfigsRequest struct {
+// Message for canceling an in-progress resumable upload.
+// `upload_id` **must** be set.
+type CancelResumableWriteRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Name of a Google Cloud Storage bucket.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // The maximum number of NotificationConfigs to return. The service may
- // return fewer than this value. The default value is 100. Specifying a value
- // above 100 will result in a page_size of 100.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A page token, received from a previous `ListNotificationConfigs` call.
- // Provide this to retrieve the subsequent page.
- //
- // When paginating, all other parameters provided to `ListNotificationConfigs`
- // must match the call that provided the page token.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // Required. The upload_id of the resumable upload to cancel. This should be
+ // copied from the `upload_id` field of `StartResumableWriteResponse`.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
}
-func (x *ListNotificationConfigsRequest) Reset() {
- *x = ListNotificationConfigsRequest{}
+func (x *CancelResumableWriteRequest) Reset() {
+ *x = CancelResumableWriteRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -930,13 +1139,13 @@ func (x *ListNotificationConfigsRequest) Reset() {
}
}
-func (x *ListNotificationConfigsRequest) String() string {
+func (x *CancelResumableWriteRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListNotificationConfigsRequest) ProtoMessage() {}
+func (*CancelResumableWriteRequest) ProtoMessage() {}
-func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
+func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -948,47 +1157,28 @@ func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead.
-func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead.
+func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10}
}
-func (x *ListNotificationConfigsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListNotificationConfigsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListNotificationConfigsRequest) GetPageToken() string {
+func (x *CancelResumableWriteRequest) GetUploadId() string {
if x != nil {
- return x.PageToken
+ return x.UploadId
}
return ""
}
-// The result of a call to ListNotificationConfigs
-type ListNotificationConfigsResponse struct {
+// Empty response message for canceling an in-progress resumable upload, will be
+// extended as needed.
+type CancelResumableWriteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
-
- // The list of items.
- NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"`
- // A token, which can be sent as `page_token` to retrieve the next page.
- // If this field is omitted, there are no subsequent pages.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
-func (x *ListNotificationConfigsResponse) Reset() {
- *x = ListNotificationConfigsResponse{}
+func (x *CancelResumableWriteResponse) Reset() {
+ *x = CancelResumableWriteResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -996,13 +1186,13 @@ func (x *ListNotificationConfigsResponse) Reset() {
}
}
-func (x *ListNotificationConfigsResponse) String() string {
+func (x *CancelResumableWriteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListNotificationConfigsResponse) ProtoMessage() {}
+func (*CancelResumableWriteResponse) ProtoMessage() {}
-func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
+func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1014,60 +1204,69 @@ func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead.
-func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead.
+func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11}
}
-func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig {
- if x != nil {
- return x.NotificationConfigs
- }
- return nil
-}
-
-func (x *ListNotificationConfigsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// Request message for ComposeObject.
-type ComposeObjectRequest struct {
+// Request message for ReadObject.
+type ReadObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Properties of the resulting object.
- Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"`
- // The list of source objects that will be concatenated into a single object.
- SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"`
- // Apply a predefined set of access controls to the destination object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"`
+ // Required. The name of the bucket containing the object to read.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the object to read.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // If present, selects a specific revision of this object (as opposed
+ // to the latest version, the default).
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // The offset for the first byte to return in the read, relative to the start
+ // of the object.
+ //
+ // A negative `read_offset` value will be interpreted as the number of bytes
+ // back from the end of the object to be returned. For example, if an object's
+ // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and
+ // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting
+ // a negative offset with magnitude larger than the size of the object will
+ // return the entire object.
+ ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"`
+ // The maximum number of `data` bytes the server is allowed to return in the
+ // sum of all `Object` messages. A `read_limit` of zero indicates that there
+ // is no limit, and a negative `read_limit` will cause an error.
+ //
+ // If the stream returns fewer bytes than allowed by the `read_limit` and no
+ // error occurred, the stream includes all data from the `read_offset` to the
+ // end of the resource.
+ ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"`
// Makes the operation conditional on whether the object's current generation
// matches the given value. Setting to 0 makes the operation succeed only if
// there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Resource name of the Cloud KMS key, of the form
- // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`,
- // that will be used to encrypt the object. Overrides the object
- // metadata's `kms_key_name` value, if any.
- KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
+ IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
- // The checksums of the complete object. This will be validated against the
- // combined checksums of the component objects.
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,10,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // Mask specifying which fields to read.
+ // The checksummed_data field and its children will always be present.
+ // If no mask is specified, will default to all fields except metadata.owner
+ // and metadata.acl.
+ // * may be used to mean "all fields".
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
}
-func (x *ComposeObjectRequest) Reset() {
- *x = ComposeObjectRequest{}
+func (x *ReadObjectRequest) Reset() {
+ *x = ReadObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1075,13 +1274,13 @@ func (x *ComposeObjectRequest) Reset() {
}
}
-func (x *ComposeObjectRequest) String() string {
+func (x *ReadObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ComposeObjectRequest) ProtoMessage() {}
+func (*ReadObjectRequest) ProtoMessage() {}
-func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
+func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1093,104 +1292,129 @@ func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead.
-func (*ComposeObjectRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead.
+func (*ReadObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12}
}
-func (x *ComposeObjectRequest) GetDestination() *Object {
+func (x *ReadObjectRequest) GetBucket() string {
if x != nil {
- return x.Destination
+ return x.Bucket
}
- return nil
+ return ""
}
-func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject {
+func (x *ReadObjectRequest) GetObject() string {
if x != nil {
- return x.SourceObjects
+ return x.Object
}
- return nil
+ return ""
}
-func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string {
+func (x *ReadObjectRequest) GetGeneration() int64 {
if x != nil {
- return x.DestinationPredefinedAcl
+ return x.Generation
}
- return ""
+ return 0
}
-func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *ReadObjectRequest) GetReadOffset() int64 {
+ if x != nil {
+ return x.ReadOffset
}
return 0
}
-func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
+func (x *ReadObjectRequest) GetReadLimit() int64 {
+ if x != nil {
+ return x.ReadLimit
}
return 0
}
-func (x *ComposeObjectRequest) GetKmsKey() string {
- if x != nil {
- return x.KmsKey
+func (x *ReadObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
- return ""
+ return 0
}
-func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
+func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
+}
+
+func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
return x.CommonObjectRequestParams
}
return nil
}
-func (x *ComposeObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
- return x.ObjectChecksums
+ return x.ReadMask
}
return nil
}
-// Message for deleting an object.
-// `bucket` and `object` **must** be set.
-type DeleteObjectRequest struct {
+// Request message for GetObject.
+type GetObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. Name of the bucket in which the object resides.
Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
- // Required. The name of the finalized object to delete.
- // Note: If you want to delete an unfinalized resumable upload please use
- // `CancelResumableWrite`.
+ // Required. Name of the object.
Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
- // If present, permanently deletes a specific revision of this object (as
- // opposed to the latest version, the default).
- Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"`
+ // If present, selects a specific revision of this object (as opposed to the
+ // latest version, the default).
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // If true, return the soft-deleted version of this object.
+ SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"`
// Makes the operation conditional on whether the object's current generation
// matches the given value. Setting to 0 makes the operation succeed only if
// there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
// Makes the operation conditional on whether the object's live generation
// does not match the given value. If no live object exists, the precondition
// fails. Setting to 0 makes the operation succeed only if there is a live
// version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // Mask specifying which fields to read.
+ // If no mask is specified, will default to all fields except metadata.acl and
+ // metadata.owner.
+ // * may be used to mean "all fields".
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
}
-func (x *DeleteObjectRequest) Reset() {
- *x = DeleteObjectRequest{}
+func (x *GetObjectRequest) Reset() {
+ *x = GetObjectRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1198,13 +1422,13 @@ func (x *DeleteObjectRequest) Reset() {
}
}
-func (x *DeleteObjectRequest) String() string {
+func (x *GetObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*DeleteObjectRequest) ProtoMessage() {}
+func (*GetObjectRequest) ProtoMessage() {}
-func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
+func (x *GetObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1216,105 +1440,107 @@ func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead.
-func (*DeleteObjectRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead.
+func (*GetObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13}
}
-func (x *DeleteObjectRequest) GetBucket() string {
+func (x *GetObjectRequest) GetBucket() string {
if x != nil {
return x.Bucket
}
return ""
}
-func (x *DeleteObjectRequest) GetObject() string {
+func (x *GetObjectRequest) GetObject() string {
if x != nil {
return x.Object
}
return ""
}
-func (x *DeleteObjectRequest) GetGeneration() int64 {
+func (x *GetObjectRequest) GetGeneration() int64 {
if x != nil {
return x.Generation
}
return 0
}
-func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 {
+func (x *GetObjectRequest) GetSoftDeleted() bool {
+ if x != nil && x.SoftDeleted != nil {
+ return *x.SoftDeleted
+ }
+ return false
+}
+
+func (x *GetObjectRequest) GetIfGenerationMatch() int64 {
if x != nil && x.IfGenerationMatch != nil {
return *x.IfGenerationMatch
}
return 0
}
-func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 {
+func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 {
if x != nil && x.IfGenerationNotMatch != nil {
return *x.IfGenerationNotMatch
}
return 0
}
-func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 {
+func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 {
if x != nil && x.IfMetagenerationMatch != nil {
return *x.IfMetagenerationMatch
}
return 0
}
-func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 {
+func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 {
if x != nil && x.IfMetagenerationNotMatch != nil {
return *x.IfMetagenerationNotMatch
}
return 0
}
-func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
return x.CommonObjectRequestParams
}
return nil
}
-// Message for restoring an object.
-// `bucket`, `object`, and `generation` **must** be set.
-type RestoreObjectRequest struct {
+func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.ReadMask
+ }
+ return nil
+}
+
+// Response message for ReadObject.
+type ReadObjectResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Name of the bucket in which the object resides.
- Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
- // Required. The name of the object to restore.
- Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
- // Required. The specific revision of the object to restore.
- Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // If false or unset, the bucket's default object ACL will be used.
- // If true, copy the source object's access controls.
- // Return an error if bucket has UBLA enabled.
- CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // A portion of the data for the object. The service **may** leave `data`
+ // empty for any given `ReadResponse`. This enables the service to inform the
+ // client that the request is still live while it is running an operation to
+ // generate more data.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"`
+ // The checksums of the complete object. If the object is downloaded in full,
+ // the client should compute one of these checksums over the downloaded object
+ // and compare it against the value provided here.
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // If read_offset and or read_limit was specified on the
+ // ReadObjectRequest, ContentRange will be populated on the first
+ // ReadObjectResponse message of the read stream.
+ ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"`
+ // Metadata of the object whose media is being returned.
+ // Only populated in the first response in the stream.
+ Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
}
-func (x *RestoreObjectRequest) Reset() {
- *x = RestoreObjectRequest{}
+func (x *ReadObjectResponse) Reset() {
+ *x = ReadObjectResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_google_storage_v2_storage_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1322,13 +1548,13 @@ func (x *RestoreObjectRequest) Reset() {
}
}
-func (x *RestoreObjectRequest) String() string {
+func (x *ReadObjectResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*RestoreObjectRequest) ProtoMessage() {}
+func (*ReadObjectResponse) ProtoMessage() {}
-func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
+func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1340,102 +1566,92 @@ func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead.
-func (*RestoreObjectRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead.
+func (*ReadObjectResponse) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14}
}
-func (x *RestoreObjectRequest) GetBucket() string {
+func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData {
if x != nil {
- return x.Bucket
+ return x.ChecksummedData
}
- return ""
+ return nil
}
-func (x *RestoreObjectRequest) GetObject() string {
+func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums {
if x != nil {
- return x.Object
+ return x.ObjectChecksums
}
- return ""
+ return nil
}
-func (x *RestoreObjectRequest) GetGeneration() int64 {
+func (x *ReadObjectResponse) GetContentRange() *ContentRange {
if x != nil {
- return x.Generation
+ return x.ContentRange
}
- return 0
+ return nil
}
-func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *ReadObjectResponse) GetMetadata() *Object {
+ if x != nil {
+ return x.Metadata
}
- return 0
+ return nil
}
-func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
- }
- return 0
-}
+// Describes an attempt to insert an object, possibly over multiple requests.
+type WriteObjectSpec struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
+ // Required. Destination object, including its name and its metadata.
+ Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Apply a predefined set of access controls to this object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // generation matches the given value. Setting to 0 makes the operation
+ // succeed only if there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live
+ // generation does not match the given value. If no live object exists, the
+ // precondition fails. Setting to 0 makes the operation succeed only if
+ // there is a live version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // The expected final object size being uploaded.
+ // If this value is set, closing the stream after writing fewer or more than
+ // `object_size` bytes will result in an OUT_OF_RANGE error.
+ //
+ // This situation is considered a client error, and if such an error occurs
+ // you must start the upload over from scratch, this time sending the correct
+ // number of bytes.
+ ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"`
}
-func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
+func (x *WriteObjectSpec) Reset() {
+ *x = WriteObjectSpec{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (x *RestoreObjectRequest) GetCopySourceAcl() bool {
- if x != nil && x.CopySourceAcl != nil {
- return *x.CopySourceAcl
- }
- return false
+func (x *WriteObjectSpec) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
-}
-
-// Message for canceling an in-progress resumable upload.
-// `upload_id` **must** be set.
-type CancelResumableWriteRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The upload_id of the resumable upload to cancel. This should be
- // copied from the `upload_id` field of `StartResumableWriteResponse`.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
-}
-
-func (x *CancelResumableWriteRequest) Reset() {
- *x = CancelResumableWriteRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CancelResumableWriteRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CancelResumableWriteRequest) ProtoMessage() {}
+func (*WriteObjectSpec) ProtoMessage() {}
-func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
+func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1447,131 +1663,126 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead.
-func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead.
+func (*WriteObjectSpec) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15}
}
-func (x *CancelResumableWriteRequest) GetUploadId() string {
+func (x *WriteObjectSpec) GetResource() *Object {
if x != nil {
- return x.UploadId
+ return x.Resource
}
- return ""
+ return nil
}
-// Empty response message for canceling an in-progress resumable upload, will be
-// extended as needed.
-type CancelResumableWriteResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+func (x *WriteObjectSpec) GetPredefinedAcl() string {
+ if x != nil {
+ return x.PredefinedAcl
+ }
+ return ""
}
-func (x *CancelResumableWriteResponse) Reset() {
- *x = CancelResumableWriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *WriteObjectSpec) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
+ return 0
}
-func (x *CancelResumableWriteResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
}
-func (*CancelResumableWriteResponse) ProtoMessage() {}
+func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
-func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
}
- return mi.MessageOf(x)
+ return 0
}
-// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead.
-func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16}
+func (x *WriteObjectSpec) GetObjectSize() int64 {
+ if x != nil && x.ObjectSize != nil {
+ return *x.ObjectSize
+ }
+ return 0
}
-// Request message for ReadObject.
-type ReadObjectRequest struct {
+// Request message for WriteObject.
+type WriteObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The name of the bucket containing the object to read.
- Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
- // Required. The name of the object to read.
- Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
- // If present, selects a specific revision of this object (as opposed
- // to the latest version, the default).
- Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
- // The offset for the first byte to return in the read, relative to the start
- // of the object.
+ // The first message of each stream should set one of the following.
//
- // A negative `read_offset` value will be interpreted as the number of bytes
- // back from the end of the object to be returned. For example, if an object's
- // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and
- // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting
- // a negative offset with magnitude larger than the size of the object will
- // return the entire object.
- ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"`
- // The maximum number of `data` bytes the server is allowed to return in the
- // sum of all `Object` messages. A `read_limit` of zero indicates that there
- // is no limit, and a negative `read_limit` will cause an error.
+ // Types that are assignable to FirstMessage:
//
- // If the stream returns fewer bytes than allowed by the `read_limit` and no
- // error occurred, the stream includes all data from the `read_offset` to the
- // end of the resource.
- ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // *WriteObjectRequest_UploadId
+ // *WriteObjectRequest_WriteObjectSpec
+ FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
+ // Required. The offset from the beginning of the object at which the data
+ // should be written.
+ //
+ // In the first `WriteObjectRequest` of a `WriteObject()` action, it
+ // indicates the initial offset for the `Write()` call. The value **must** be
+ // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+ // return (0 if this is the first write to the object).
+ //
+ // On subsequent calls, this value **must** be no larger than the sum of the
+ // first `write_offset` and the sizes of all `data` chunks sent previously on
+ // this stream.
+ //
+ // An incorrect value will cause an error.
+ WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
+ // A portion of the data for the object.
+ //
+ // Types that are assignable to Data:
+ //
+ // *WriteObjectRequest_ChecksummedData
+ Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
+ // Checksums for the complete object. If the checksums computed by the service
+ // don't match the specified checksums the call will fail. May only be
+ // provided in the first or last request (either with first_message, or
+ // finish_write set).
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+ // will cause an error.
+ // For a non-resumable write (where the upload_id was not set in the first
+ // message), it is an error not to set this field in the final message of the
+ // stream.
+ FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
- // Mask specifying which fields to read.
- // The checksummed_data field and its children will always be present.
- // If no mask is specified, will default to all fields except metadata.owner
- // and metadata.acl.
- // * may be used to mean "all fields".
- ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *ReadObjectRequest) Reset() {
- *x = ReadObjectRequest{}
+func (x *WriteObjectRequest) Reset() {
+ *x = WriteObjectRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[17]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ReadObjectRequest) String() string {
+func (x *WriteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ReadObjectRequest) ProtoMessage() {}
+func (*WriteObjectRequest) ProtoMessage() {}
-func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[17]
+func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1582,270 +1793,138 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead.
-func (*ReadObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17}
+// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*WriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16}
}
-func (x *ReadObjectRequest) GetBucket() string {
- if x != nil {
- return x.Bucket
+func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage {
+ if m != nil {
+ return m.FirstMessage
}
- return ""
+ return nil
}
-func (x *ReadObjectRequest) GetObject() string {
- if x != nil {
- return x.Object
+func (x *WriteObjectRequest) GetUploadId() string {
+ if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok {
+ return x.UploadId
}
return ""
}
-func (x *ReadObjectRequest) GetGeneration() int64 {
- if x != nil {
- return x.Generation
+func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
+ if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok {
+ return x.WriteObjectSpec
}
- return 0
+ return nil
}
-func (x *ReadObjectRequest) GetReadOffset() int64 {
+func (x *WriteObjectRequest) GetWriteOffset() int64 {
if x != nil {
- return x.ReadOffset
+ return x.WriteOffset
}
return 0
}
-func (x *ReadObjectRequest) GetReadLimit() int64 {
- if x != nil {
- return x.ReadLimit
+func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data {
+ if m != nil {
+ return m.Data
}
- return 0
+ return nil
}
-func (x *ReadObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData {
+ if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok {
+ return x.ChecksummedData
}
- return 0
+ return nil
}
-func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
+func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
}
- return 0
+ return nil
}
-func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
-}
-
-func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
- }
- return 0
-}
-
-func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *WriteObjectRequest) GetFinishWrite() bool {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.FinishWrite
}
- return nil
+ return false
}
-func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
+func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
- return x.ReadMask
+ return x.CommonObjectRequestParams
}
return nil
}
-// Request message for GetObject.
-type GetObjectRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the bucket in which the object resides.
- Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
- // Required. Name of the object.
- Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
- // If present, selects a specific revision of this object (as opposed to the
- // latest version, the default).
- Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
- // If true, return the soft-deleted version of this object.
- SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
- // Mask specifying which fields to read.
- // If no mask is specified, will default to all fields except metadata.acl and
- // metadata.owner.
- // * may be used to mean "all fields".
- ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
-}
-
-func (x *GetObjectRequest) Reset() {
- *x = GetObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetObjectRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetObjectRequest) ProtoMessage() {}
-
-func (x *GetObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead.
-func (*GetObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *GetObjectRequest) GetBucket() string {
- if x != nil {
- return x.Bucket
- }
- return ""
-}
-
-func (x *GetObjectRequest) GetObject() string {
- if x != nil {
- return x.Object
- }
- return ""
-}
-
-func (x *GetObjectRequest) GetGeneration() int64 {
- if x != nil {
- return x.Generation
- }
- return 0
+type isWriteObjectRequest_FirstMessage interface {
+ isWriteObjectRequest_FirstMessage()
}
-func (x *GetObjectRequest) GetSoftDeleted() bool {
- if x != nil && x.SoftDeleted != nil {
- return *x.SoftDeleted
- }
- return false
+type WriteObjectRequest_UploadId struct {
+ // For resumable uploads. This should be the `upload_id` returned from a
+ // call to `StartResumableWriteResponse`.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
}
-func (x *GetObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
- }
- return 0
+type WriteObjectRequest_WriteObjectSpec struct {
+ // For non-resumable uploads. Describes the overall upload, including the
+ // destination bucket and object name, preconditions, etc.
+ WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
}
-func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
- }
- return 0
-}
+func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {}
-func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
-}
+func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {}
-func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
- }
- return 0
+type isWriteObjectRequest_Data interface {
+ isWriteObjectRequest_Data()
}
-func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
+type WriteObjectRequest_ChecksummedData struct {
+ // The data to insert. If a crc32c checksum is provided that doesn't match
+ // the checksum computed by the service, the request will fail.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
}
-func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.ReadMask
- }
- return nil
-}
+func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {}
-// Response message for ReadObject.
-type ReadObjectResponse struct {
+// Response message for WriteObject.
+type WriteObjectResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // A portion of the data for the object. The service **may** leave `data`
- // empty for any given `ReadResponse`. This enables the service to inform the
- // client that the request is still live while it is running an operation to
- // generate more data.
- ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"`
- // The checksums of the complete object. If the object is downloaded in full,
- // the client should compute one of these checksums over the downloaded object
- // and compare it against the value provided here.
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
- // If read_offset and or read_limit was specified on the
- // ReadObjectRequest, ContentRange will be populated on the first
- // ReadObjectResponse message of the read stream.
- ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"`
- // Metadata of the object whose media is being returned.
- // Only populated in the first response in the stream.
- Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // The response will set one of the following.
+ //
+ // Types that are assignable to WriteStatus:
+ //
+ // *WriteObjectResponse_PersistedSize
+ // *WriteObjectResponse_Resource
+ WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
}
-func (x *ReadObjectResponse) Reset() {
- *x = ReadObjectResponse{}
+func (x *WriteObjectResponse) Reset() {
+ *x = WriteObjectResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[19]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ReadObjectResponse) String() string {
+func (x *WriteObjectResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ReadObjectResponse) ProtoMessage() {}
+func (*WriteObjectResponse) ProtoMessage() {}
-func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[19]
+func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1856,159 +1935,54 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead.
-func (*ReadObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19}
+// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead.
+func (*WriteObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17}
}
-func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData {
- if x != nil {
- return x.ChecksummedData
+func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus {
+ if m != nil {
+ return m.WriteStatus
}
return nil
}
-func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums {
- if x != nil {
- return x.ObjectChecksums
+func (x *WriteObjectResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok {
+ return x.PersistedSize
}
- return nil
+ return 0
}
-func (x *ReadObjectResponse) GetContentRange() *ContentRange {
- if x != nil {
- return x.ContentRange
+func (x *WriteObjectResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok {
+ return x.Resource
}
return nil
}
-func (x *ReadObjectResponse) GetMetadata() *Object {
- if x != nil {
- return x.Metadata
- }
- return nil
+type isWriteObjectResponse_WriteStatus interface {
+ isWriteObjectResponse_WriteStatus()
}
-// Describes an attempt to insert an object, possibly over multiple requests.
-type WriteObjectSpec struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+type WriteObjectResponse_PersistedSize struct {
+ // The total number of bytes that have been processed for the given object
+ // from all `WriteObject` calls. Only set if the upload has not finalized.
+ PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
+}
- // Required. Destination object, including its name and its metadata.
- Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
- // Apply a predefined set of access controls to this object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
- // Makes the operation conditional on whether the object's current
- // generation matches the given value. Setting to 0 makes the operation
- // succeed only if there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live
- // generation does not match the given value. If no live object exists, the
- // precondition fails. Setting to 0 makes the operation succeed only if
- // there is a live version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // The expected final object size being uploaded.
- // If this value is set, closing the stream after writing fewer or more than
- // `object_size` bytes will result in an OUT_OF_RANGE error.
- //
- // This situation is considered a client error, and if such an error occurs
- // you must start the upload over from scratch, this time sending the correct
- // number of bytes.
- ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"`
-}
-
-func (x *WriteObjectSpec) Reset() {
- *x = WriteObjectSpec{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *WriteObjectSpec) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*WriteObjectSpec) ProtoMessage() {}
-
-func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead.
-func (*WriteObjectSpec) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *WriteObjectSpec) GetResource() *Object {
- if x != nil {
- return x.Resource
- }
- return nil
-}
-
-func (x *WriteObjectSpec) GetPredefinedAcl() string {
- if x != nil {
- return x.PredefinedAcl
- }
- return ""
-}
-
-func (x *WriteObjectSpec) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
- }
- return 0
-}
-
-func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
- }
- return 0
-}
-
-func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
+type WriteObjectResponse_Resource struct {
+ // A resource containing the metadata for the uploaded object. Only set if
+ // the upload has finalized.
+ Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
}
-func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
- }
- return 0
-}
+func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {}
-func (x *WriteObjectSpec) GetObjectSize() int64 {
- if x != nil && x.ObjectSize != nil {
- return *x.ObjectSize
- }
- return 0
-}
+func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {}
-// Request message for WriteObject.
-type WriteObjectRequest struct {
+// Request message for BidiWriteObject.
+type BidiWriteObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -2017,9 +1991,9 @@ type WriteObjectRequest struct {
//
// Types that are assignable to FirstMessage:
//
- // *WriteObjectRequest_UploadId
- // *WriteObjectRequest_WriteObjectSpec
- FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
+ // *BidiWriteObjectRequest_UploadId
+ // *BidiWriteObjectRequest_WriteObjectSpec
+ FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
// Required. The offset from the beginning of the object at which the data
// should be written.
//
@@ -2032,47 +2006,62 @@ type WriteObjectRequest struct {
// first `write_offset` and the sizes of all `data` chunks sent previously on
// this stream.
//
- // An incorrect value will cause an error.
+ // An invalid value will cause an error.
WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
// A portion of the data for the object.
//
// Types that are assignable to Data:
//
- // *WriteObjectRequest_ChecksummedData
- Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
+ // *BidiWriteObjectRequest_ChecksummedData
+ Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
// Checksums for the complete object. If the checksums computed by the service
// don't match the specified checksums the call will fail. May only be
- // provided in the first or last request (either with first_message, or
- // finish_write set).
+ // provided in last request (with finish_write set).
ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // For each BidiWriteObjectRequest where state_lookup is `true` or the client
+ // closes the stream, the service will send a BidiWriteObjectResponse
+ // containing the current persisted size. The persisted size sent in responses
+ // covers all the bytes the server has persisted thus far and can be used to
+ // decide what data is safe for the client to drop. Note that the object's
+ // current size reported by the BidiWriteObjectResponse may lag behind the
+ // number of bytes written by the client. This field is ignored if
+ // `finish_write` is set to true.
+ StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"`
+ // Persists data written on the stream, up to and including the current
+ // message, to permanent storage. This option should be used sparingly as it
+ // may reduce performance. Ongoing writes will periodically be persisted on
+ // the server even when `flush` is not set. This field is ignored if
+ // `finish_write` is set to true since there's no need to checkpoint or flush
+ // if this message completes the write.
+ Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"`
// If `true`, this indicates that the write is complete. Sending any
// `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
// will cause an error.
// For a non-resumable write (where the upload_id was not set in the first
// message), it is an error not to set this field in the final message of the
// stream.
- FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
+ FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *WriteObjectRequest) Reset() {
- *x = WriteObjectRequest{}
+func (x *BidiWriteObjectRequest) Reset() {
+ *x = BidiWriteObjectRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[21]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *WriteObjectRequest) String() string {
+func (x *BidiWriteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectRequest) ProtoMessage() {}
+func (*BidiWriteObjectRequest) ProtoMessage() {}
-func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[21]
+func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2083,108 +2072,122 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*WriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21}
+// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18}
}
-func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage {
+func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage {
if m != nil {
return m.FirstMessage
}
return nil
}
-func (x *WriteObjectRequest) GetUploadId() string {
- if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok {
+func (x *BidiWriteObjectRequest) GetUploadId() string {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok {
return x.UploadId
}
return ""
}
-func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
- if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok {
+func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok {
return x.WriteObjectSpec
}
return nil
}
-func (x *WriteObjectRequest) GetWriteOffset() int64 {
+func (x *BidiWriteObjectRequest) GetWriteOffset() int64 {
if x != nil {
return x.WriteOffset
}
return 0
}
-func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data {
+func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data {
if m != nil {
return m.Data
}
return nil
}
-func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData {
- if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok {
+func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData {
+ if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok {
return x.ChecksummedData
}
return nil
}
-func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
if x != nil {
return x.ObjectChecksums
}
return nil
}
-func (x *WriteObjectRequest) GetFinishWrite() bool {
+func (x *BidiWriteObjectRequest) GetStateLookup() bool {
+ if x != nil {
+ return x.StateLookup
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetFlush() bool {
+ if x != nil {
+ return x.Flush
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetFinishWrite() bool {
if x != nil {
return x.FinishWrite
}
return false
}
-func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
return x.CommonObjectRequestParams
}
return nil
}
-type isWriteObjectRequest_FirstMessage interface {
- isWriteObjectRequest_FirstMessage()
+type isBidiWriteObjectRequest_FirstMessage interface {
+ isBidiWriteObjectRequest_FirstMessage()
}
-type WriteObjectRequest_UploadId struct {
+type BidiWriteObjectRequest_UploadId struct {
// For resumable uploads. This should be the `upload_id` returned from a
// call to `StartResumableWriteResponse`.
UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
}
-type WriteObjectRequest_WriteObjectSpec struct {
+type BidiWriteObjectRequest_WriteObjectSpec struct {
// For non-resumable uploads. Describes the overall upload, including the
// destination bucket and object name, preconditions, etc.
WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
}
-func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {}
+func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {}
-func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {}
+func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
-type isWriteObjectRequest_Data interface {
- isWriteObjectRequest_Data()
+type isBidiWriteObjectRequest_Data interface {
+ isBidiWriteObjectRequest_Data()
}
-type WriteObjectRequest_ChecksummedData struct {
+type BidiWriteObjectRequest_ChecksummedData struct {
// The data to insert. If a crc32c checksum is provided that doesn't match
// the checksum computed by the service, the request will fail.
ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
}
-func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {}
+func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {}
-// Response message for WriteObject.
-type WriteObjectResponse struct {
+// Response message for BidiWriteObject.
+type BidiWriteObjectResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -2193,28 +2196,28 @@ type WriteObjectResponse struct {
//
// Types that are assignable to WriteStatus:
//
- // *WriteObjectResponse_PersistedSize
- // *WriteObjectResponse_Resource
- WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // *BidiWriteObjectResponse_PersistedSize
+ // *BidiWriteObjectResponse_Resource
+ WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
}
-func (x *WriteObjectResponse) Reset() {
- *x = WriteObjectResponse{}
+func (x *BidiWriteObjectResponse) Reset() {
+ *x = BidiWriteObjectResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[22]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *WriteObjectResponse) String() string {
+func (x *BidiWriteObjectResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectResponse) ProtoMessage() {}
+func (*BidiWriteObjectResponse) ProtoMessage() {}
-func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[22]
+func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2225,134 +2228,131 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead.
-func (*WriteObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22}
+// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19}
}
-func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus {
+func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus {
if m != nil {
return m.WriteStatus
}
return nil
}
-func (x *WriteObjectResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok {
+func (x *BidiWriteObjectResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok {
return x.PersistedSize
}
return 0
}
-func (x *WriteObjectResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok {
+func (x *BidiWriteObjectResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok {
return x.Resource
}
return nil
}
-type isWriteObjectResponse_WriteStatus interface {
- isWriteObjectResponse_WriteStatus()
+type isBidiWriteObjectResponse_WriteStatus interface {
+ isBidiWriteObjectResponse_WriteStatus()
}
-type WriteObjectResponse_PersistedSize struct {
+type BidiWriteObjectResponse_PersistedSize struct {
// The total number of bytes that have been processed for the given object
// from all `WriteObject` calls. Only set if the upload has not finalized.
PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
}
-type WriteObjectResponse_Resource struct {
+type BidiWriteObjectResponse_Resource struct {
// A resource containing the metadata for the uploaded object. Only set if
// the upload has finalized.
Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
}
-func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {}
+func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {}
-func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {}
+func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {}
-// Request message for BidiWriteObject.
-type BidiWriteObjectRequest struct {
+// Request message for ListObjects.
+type ListObjectsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The first message of each stream should set one of the following.
- //
- // Types that are assignable to FirstMessage:
- //
- // *BidiWriteObjectRequest_UploadId
- // *BidiWriteObjectRequest_WriteObjectSpec
- FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
- // Required. The offset from the beginning of the object at which the data
- // should be written.
- //
- // In the first `WriteObjectRequest` of a `WriteObject()` action, it
- // indicates the initial offset for the `Write()` call. The value **must** be
- // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
- // return (0 if this is the first write to the object).
- //
- // On subsequent calls, this value **must** be no larger than the sum of the
- // first `write_offset` and the sizes of all `data` chunks sent previously on
- // this stream.
- //
- // An invalid value will cause an error.
- WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
- // A portion of the data for the object.
- //
- // Types that are assignable to Data:
- //
- // *BidiWriteObjectRequest_ChecksummedData
- Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
- // Checksums for the complete object. If the checksums computed by the service
- // don't match the specified checksums the call will fail. May only be
- // provided in the first or last request (either with first_message, or
- // finish_write set).
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
- // For each BidiWriteObjectRequest where state_lookup is `true` or the client
- // closes the stream, the service will send a BidiWriteObjectResponse
- // containing the current persisted size. The persisted size sent in responses
- // covers all the bytes the server has persisted thus far and can be used to
- // decide what data is safe for the client to drop. Note that the object's
- // current size reported by the BidiWriteObjectResponse may lag behind the
- // number of bytes written by the client. This field is ignored if
- // `finish_write` is set to true.
- StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"`
- // Persists data written on the stream, up to and including the current
- // message, to permanent storage. This option should be used sparingly as it
- // may reduce performance. Ongoing writes will periodically be persisted on
- // the server even when `flush` is not set. This field is ignored if
- // `finish_write` is set to true since there's no need to checkpoint or flush
- // if this message completes the write.
- Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"`
- // If `true`, this indicates that the write is complete. Sending any
- // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
- // will cause an error.
- // For a non-resumable write (where the upload_id was not set in the first
- // message), it is an error not to set this field in the final message of the
- // stream.
- FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // Required. Name of the bucket in which to look for objects.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Maximum number of `items` plus `prefixes` to return
+ // in a single page of responses. As duplicate `prefixes` are
+ // omitted, fewer total results may be returned than requested. The service
+ // will use this parameter or 1,000 items, whichever is smaller.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // A previously-returned page token representing part of the larger set of
+ // results to view.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // If set, returns results in a directory-like mode. `items` will contain
+ // only objects whose names, aside from the `prefix`, do not
+ // contain `delimiter`. Objects whose names, aside from the
+ // `prefix`, contain `delimiter` will have their name,
+ // truncated after the `delimiter`, returned in
+ // `prefixes`. Duplicate `prefixes` are omitted.
+ Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
+ // If true, objects that end in exactly one instance of `delimiter`
+ // will have their metadata included in `items` in addition to
+ // `prefixes`.
+ IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"`
+ // Filter results to objects whose names begin with this prefix.
+ Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ // If `true`, lists all versions of an object as distinct results.
+ // For more information, see
+ // [Object
+ // Versioning](https://cloud.google.com/storage/docs/object-versioning).
+ Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"`
+ // Mask specifying which fields to read from each result.
+ // If no mask is specified, will default to all fields except items.acl and
+ // items.owner.
+ // * may be used to mean "all fields".
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ // Optional. Filter results to objects whose names are lexicographically equal
+ // to or after lexicographic_start. If lexicographic_end is also set, the
+ // objects listed have names between lexicographic_start (inclusive) and
+ // lexicographic_end (exclusive).
+ LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"`
+ // Optional. Filter results to objects whose names are lexicographically
+ // before lexicographic_end. If lexicographic_start is also set, the objects
+ // listed have names between lexicographic_start (inclusive) and
+ // lexicographic_end (exclusive).
+ LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"`
+ // Optional. If true, only list all soft-deleted versions of the object.
+ // Soft delete policy is required to set this option.
+ SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"`
+ // Optional. If true, will also include folders and managed folders (besides
+ // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+ IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"`
+ // Optional. Filter results to objects and prefixes that match this glob
+ // pattern. See [List Objects Using
+ // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+ // for the full syntax.
+ MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
}
-func (x *BidiWriteObjectRequest) Reset() {
- *x = BidiWriteObjectRequest{}
+func (x *ListObjectsRequest) Reset() {
+ *x = ListObjectsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[23]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *BidiWriteObjectRequest) String() string {
+func (x *ListObjectsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*BidiWriteObjectRequest) ProtoMessage() {}
+func (*ListObjectsRequest) ProtoMessage() {}
-func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[23]
+func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2363,152 +2363,132 @@ func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23}
+// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead.
+func (*ListObjectsRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20}
}
-func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage {
- if m != nil {
- return m.FirstMessage
+func (x *ListObjectsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
}
- return nil
+ return ""
}
-func (x *BidiWriteObjectRequest) GetUploadId() string {
- if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok {
- return x.UploadId
+func (x *ListObjectsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListObjectsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
}
return ""
}
-func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
- if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok {
- return x.WriteObjectSpec
+func (x *ListObjectsRequest) GetDelimiter() string {
+ if x != nil {
+ return x.Delimiter
}
- return nil
+ return ""
}
-func (x *BidiWriteObjectRequest) GetWriteOffset() int64 {
+func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool {
if x != nil {
- return x.WriteOffset
+ return x.IncludeTrailingDelimiter
}
- return 0
+ return false
}
-func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data {
- if m != nil {
- return m.Data
+func (x *ListObjectsRequest) GetPrefix() string {
+ if x != nil {
+ return x.Prefix
}
- return nil
+ return ""
}
-func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData {
- if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok {
- return x.ChecksummedData
+func (x *ListObjectsRequest) GetVersions() bool {
+ if x != nil {
+ return x.Versions
}
- return nil
+ return false
}
-func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
- return x.ObjectChecksums
+ return x.ReadMask
}
return nil
}
-func (x *BidiWriteObjectRequest) GetStateLookup() bool {
+func (x *ListObjectsRequest) GetLexicographicStart() string {
if x != nil {
- return x.StateLookup
+ return x.LexicographicStart
}
- return false
+ return ""
}
-func (x *BidiWriteObjectRequest) GetFlush() bool {
+func (x *ListObjectsRequest) GetLexicographicEnd() string {
if x != nil {
- return x.Flush
+ return x.LexicographicEnd
}
- return false
+ return ""
}
-func (x *BidiWriteObjectRequest) GetFinishWrite() bool {
+func (x *ListObjectsRequest) GetSoftDeleted() bool {
if x != nil {
- return x.FinishWrite
+ return x.SoftDeleted
}
return false
}
-func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *ListObjectsRequest) GetIncludeFoldersAsPrefixes() bool {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.IncludeFoldersAsPrefixes
}
- return nil
+ return false
}
-type isBidiWriteObjectRequest_FirstMessage interface {
- isBidiWriteObjectRequest_FirstMessage()
+func (x *ListObjectsRequest) GetMatchGlob() string {
+ if x != nil {
+ return x.MatchGlob
+ }
+ return ""
}
-type BidiWriteObjectRequest_UploadId struct {
- // For resumable uploads. This should be the `upload_id` returned from a
- // call to `StartResumableWriteResponse`.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
-}
+// Request object for `QueryWriteStatus`.
+type QueryWriteStatusRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type BidiWriteObjectRequest_WriteObjectSpec struct {
- // For non-resumable uploads. Describes the overall upload, including the
- // destination bucket and object name, preconditions, etc.
- WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
-}
-
-func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {}
-
-func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
-
-type isBidiWriteObjectRequest_Data interface {
- isBidiWriteObjectRequest_Data()
-}
-
-type BidiWriteObjectRequest_ChecksummedData struct {
- // The data to insert. If a crc32c checksum is provided that doesn't match
- // the checksum computed by the service, the request will fail.
- ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
-}
-
-func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {}
-
-// Response message for BidiWriteObject.
-type BidiWriteObjectResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The response will set one of the following.
- //
- // Types that are assignable to WriteStatus:
- //
- // *BidiWriteObjectResponse_PersistedSize
- // *BidiWriteObjectResponse_Resource
- WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // Required. The name of the resume token for the object whose write status is
+ // being requested.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *BidiWriteObjectResponse) Reset() {
- *x = BidiWriteObjectResponse{}
+func (x *QueryWriteStatusRequest) Reset() {
+ *x = QueryWriteStatusRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[24]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *BidiWriteObjectResponse) String() string {
+func (x *QueryWriteStatusRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*BidiWriteObjectResponse) ProtoMessage() {}
+func (*QueryWriteStatusRequest) ProtoMessage() {}
-func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[24]
+func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2519,131 +2499,57 @@ func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead.
-func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24}
-}
-
-func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus {
- if m != nil {
- return m.WriteStatus
- }
- return nil
+// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead.
+func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21}
}
-func (x *BidiWriteObjectResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok {
- return x.PersistedSize
+func (x *QueryWriteStatusRequest) GetUploadId() string {
+ if x != nil {
+ return x.UploadId
}
- return 0
+ return ""
}
-func (x *BidiWriteObjectResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok {
- return x.Resource
+func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
}
return nil
}
-type isBidiWriteObjectResponse_WriteStatus interface {
- isBidiWriteObjectResponse_WriteStatus()
-}
-
-type BidiWriteObjectResponse_PersistedSize struct {
- // The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. Only set if the upload has not finalized.
- PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
-}
-
-type BidiWriteObjectResponse_Resource struct {
- // A resource containing the metadata for the uploaded object. Only set if
- // the upload has finalized.
- Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
-}
-
-func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {}
-
-func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {}
-
-// Request message for ListObjects.
-type ListObjectsRequest struct {
+// Response object for `QueryWriteStatus`.
+type QueryWriteStatusResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Name of the bucket in which to look for objects.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Maximum number of `items` plus `prefixes` to return
- // in a single page of responses. As duplicate `prefixes` are
- // omitted, fewer total results may be returned than requested. The service
- // will use this parameter or 1,000 items, whichever is smaller.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A previously-returned page token representing part of the larger set of
- // results to view.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // If set, returns results in a directory-like mode. `items` will contain
- // only objects whose names, aside from the `prefix`, do not
- // contain `delimiter`. Objects whose names, aside from the
- // `prefix`, contain `delimiter` will have their name,
- // truncated after the `delimiter`, returned in
- // `prefixes`. Duplicate `prefixes` are omitted.
- Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
- // If true, objects that end in exactly one instance of `delimiter`
- // will have their metadata included in `items` in addition to
- // `prefixes`.
- IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"`
- // Filter results to objects whose names begin with this prefix.
- Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
- // If `true`, lists all versions of an object as distinct results.
- // For more information, see
- // [Object
- // Versioning](https://cloud.google.com/storage/docs/object-versioning).
- Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"`
- // Mask specifying which fields to read from each result.
- // If no mask is specified, will default to all fields except items.acl and
- // items.owner.
- // * may be used to mean "all fields".
- ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
- // Optional. Filter results to objects whose names are lexicographically equal
- // to or after lexicographic_start. If lexicographic_end is also set, the
- // objects listed have names between lexicographic_start (inclusive) and
- // lexicographic_end (exclusive).
- LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"`
- // Optional. Filter results to objects whose names are lexicographically
- // before lexicographic_end. If lexicographic_start is also set, the objects
- // listed have names between lexicographic_start (inclusive) and
- // lexicographic_end (exclusive).
- LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"`
- // Optional. If true, only list all soft-deleted versions of the object.
- // Soft delete policy is required to set this option.
- SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"`
- // Optional. If true, will also include folders and managed folders (besides
- // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'.
- IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"`
- // Optional. Filter results to objects and prefixes that match this glob
- // pattern. See [List Objects Using
- // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
- // for the full syntax.
- MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
+ // The response will set one of the following.
+ //
+ // Types that are assignable to WriteStatus:
+ //
+ // *QueryWriteStatusResponse_PersistedSize
+ // *QueryWriteStatusResponse_Resource
+ WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
}
-func (x *ListObjectsRequest) Reset() {
- *x = ListObjectsRequest{}
+func (x *QueryWriteStatusResponse) Reset() {
+ *x = QueryWriteStatusResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[25]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ListObjectsRequest) String() string {
+func (x *QueryWriteStatusResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListObjectsRequest) ProtoMessage() {}
+func (*QueryWriteStatusResponse) ProtoMessage() {}
-func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[25]
+func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2654,250 +2560,53 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead.
-func (*ListObjectsRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25}
+// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead.
+func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22}
}
-func (x *ListObjectsRequest) GetParent() string {
- if x != nil {
- return x.Parent
+func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus {
+ if m != nil {
+ return m.WriteStatus
}
- return ""
+ return nil
}
-func (x *ListObjectsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
+func (x *QueryWriteStatusResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok {
+ return x.PersistedSize
}
return 0
}
-func (x *ListObjectsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-func (x *ListObjectsRequest) GetDelimiter() string {
- if x != nil {
- return x.Delimiter
+func (x *QueryWriteStatusResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok {
+ return x.Resource
}
- return ""
+ return nil
}
-func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool {
- if x != nil {
- return x.IncludeTrailingDelimiter
- }
- return false
+type isQueryWriteStatusResponse_WriteStatus interface {
+ isQueryWriteStatusResponse_WriteStatus()
}
-func (x *ListObjectsRequest) GetPrefix() string {
- if x != nil {
- return x.Prefix
- }
- return ""
+type QueryWriteStatusResponse_PersistedSize struct {
+ // The total number of bytes that have been processed for the given object
+ // from all `WriteObject` calls. This is the correct value for the
+ // 'write_offset' field to use when resuming the `WriteObject` operation.
+ // Only set if the upload has not finalized.
+ PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
}
-func (x *ListObjectsRequest) GetVersions() bool {
- if x != nil {
- return x.Versions
- }
- return false
+type QueryWriteStatusResponse_Resource struct {
+ // A resource containing the metadata for the uploaded object. Only set if
+ // the upload has finalized.
+ Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
}
-func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.ReadMask
- }
- return nil
-}
+func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {}
-func (x *ListObjectsRequest) GetLexicographicStart() string {
- if x != nil {
- return x.LexicographicStart
- }
- return ""
-}
-
-func (x *ListObjectsRequest) GetLexicographicEnd() string {
- if x != nil {
- return x.LexicographicEnd
- }
- return ""
-}
-
-func (x *ListObjectsRequest) GetSoftDeleted() bool {
- if x != nil {
- return x.SoftDeleted
- }
- return false
-}
-
-func (x *ListObjectsRequest) GetIncludeFoldersAsPrefixes() bool {
- if x != nil {
- return x.IncludeFoldersAsPrefixes
- }
- return false
-}
-
-func (x *ListObjectsRequest) GetMatchGlob() string {
- if x != nil {
- return x.MatchGlob
- }
- return ""
-}
-
-// Request object for `QueryWriteStatus`.
-type QueryWriteStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the resume token for the object whose write status is
- // being requested.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
-}
-
-func (x *QueryWriteStatusRequest) Reset() {
- *x = QueryWriteStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueryWriteStatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueryWriteStatusRequest) ProtoMessage() {}
-
-func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead.
-func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26}
-}
-
-func (x *QueryWriteStatusRequest) GetUploadId() string {
- if x != nil {
- return x.UploadId
- }
- return ""
-}
-
-func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
-}
-
-// Response object for `QueryWriteStatus`.
-type QueryWriteStatusResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The response will set one of the following.
- //
- // Types that are assignable to WriteStatus:
- //
- // *QueryWriteStatusResponse_PersistedSize
- // *QueryWriteStatusResponse_Resource
- WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
-}
-
-func (x *QueryWriteStatusResponse) Reset() {
- *x = QueryWriteStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueryWriteStatusResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueryWriteStatusResponse) ProtoMessage() {}
-
-func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead.
-func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27}
-}
-
-func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus {
- if m != nil {
- return m.WriteStatus
- }
- return nil
-}
-
-func (x *QueryWriteStatusResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok {
- return x.PersistedSize
- }
- return 0
-}
-
-func (x *QueryWriteStatusResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok {
- return x.Resource
- }
- return nil
-}
-
-type isQueryWriteStatusResponse_WriteStatus interface {
- isQueryWriteStatusResponse_WriteStatus()
-}
-
-type QueryWriteStatusResponse_PersistedSize struct {
- // The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. This is the correct value for the
- // 'write_offset' field to use when resuming the `WriteObject` operation.
- // Only set if the upload has not finalized.
- PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
-}
-
-type QueryWriteStatusResponse_Resource struct {
- // A resource containing the metadata for the uploaded object. Only set if
- // the upload has finalized.
- Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
-}
-
-func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {}
-
-func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {}
+func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {}
// Request message for RewriteObject.
// If the source object is encrypted using a Customer-Supplied Encryption Key
@@ -3011,7 +2720,7 @@ type RewriteObjectRequest struct {
func (x *RewriteObjectRequest) Reset() {
*x = RewriteObjectRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[28]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3024,7 +2733,7 @@ func (x *RewriteObjectRequest) String() string {
func (*RewriteObjectRequest) ProtoMessage() {}
func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[28]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3037,7 +2746,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead.
func (*RewriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23}
}
func (x *RewriteObjectRequest) GetDestinationName() string {
@@ -3227,7 +2936,7 @@ type RewriteResponse struct {
func (x *RewriteResponse) Reset() {
*x = RewriteResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[29]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3240,7 +2949,7 @@ func (x *RewriteResponse) String() string {
func (*RewriteResponse) ProtoMessage() {}
func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[29]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3253,7 +2962,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead.
func (*RewriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24}
}
func (x *RewriteResponse) GetTotalBytesRewritten() int64 {
@@ -3312,7 +3021,7 @@ type StartResumableWriteRequest struct {
func (x *StartResumableWriteRequest) Reset() {
*x = StartResumableWriteRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3325,7 +3034,7 @@ func (x *StartResumableWriteRequest) String() string {
func (*StartResumableWriteRequest) ProtoMessage() {}
func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3338,7 +3047,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead.
func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25}
}
func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec {
@@ -3376,7 +3085,7 @@ type StartResumableWriteResponse struct {
func (x *StartResumableWriteResponse) Reset() {
*x = StartResumableWriteResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[31]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3389,7 +3098,7 @@ func (x *StartResumableWriteResponse) String() string {
func (*StartResumableWriteResponse) ProtoMessage() {}
func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[31]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3402,7 +3111,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead.
func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26}
}
func (x *StartResumableWriteResponse) GetUploadId() string {
@@ -3459,7 +3168,7 @@ type UpdateObjectRequest struct {
func (x *UpdateObjectRequest) Reset() {
*x = UpdateObjectRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[32]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3472,7 +3181,7 @@ func (x *UpdateObjectRequest) String() string {
func (*UpdateObjectRequest) ProtoMessage() {}
func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[32]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3485,7 +3194,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead.
func (*UpdateObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27}
}
func (x *UpdateObjectRequest) GetObject() *Object {
@@ -3558,7 +3267,7 @@ type GetServiceAccountRequest struct {
func (x *GetServiceAccountRequest) Reset() {
*x = GetServiceAccountRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[33]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3571,7 +3280,7 @@ func (x *GetServiceAccountRequest) String() string {
func (*GetServiceAccountRequest) ProtoMessage() {}
func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[33]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3584,7 +3293,7 @@ func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead.
func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28}
}
func (x *GetServiceAccountRequest) GetProject() string {
@@ -3594,37 +3303,35 @@ func (x *GetServiceAccountRequest) GetProject() string {
return ""
}
-// Request message for CreateHmacKey.
-type CreateHmacKeyRequest struct {
+// A service account, owned by Cloud Storage, which may be used when taking
+// action on behalf of a given project, for example to publish Pub/Sub
+// notifications or to retrieve security keys.
+type ServiceAccount struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The project that the HMAC-owning service account lives in, in the
- // format of "projects/{projectIdentifier}". {projectIdentifier} can be the
- // project ID or project number.
- Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
- // Required. The service account to create the HMAC for.
- ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+ // The ID of the notification.
+ EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
}
-func (x *CreateHmacKeyRequest) Reset() {
- *x = CreateHmacKeyRequest{}
+func (x *ServiceAccount) Reset() {
+ *x = ServiceAccount{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[34]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *CreateHmacKeyRequest) String() string {
+func (x *ServiceAccount) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*CreateHmacKeyRequest) ProtoMessage() {}
+func (*ServiceAccount) ProtoMessage() {}
-func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[34]
+func (x *ServiceAccount) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3635,21 +3342,74 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34}
-}
-
-func (x *CreateHmacKeyRequest) GetProject() string {
- if x != nil {
- return x.Project
- }
- return ""
+// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead.
+func (*ServiceAccount) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29}
}
-func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string {
+func (x *ServiceAccount) GetEmailAddress() string {
if x != nil {
- return x.ServiceAccountEmail
+ return x.EmailAddress
+ }
+ return ""
+}
+
+// Request message for CreateHmacKey.
+type CreateHmacKeyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The project that the HMAC-owning service account lives in, in the
+ // format of "projects/{projectIdentifier}". {projectIdentifier} can be the
+ // project ID or project number.
+ Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
+ // Required. The service account to create the HMAC for.
+ ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+}
+
+func (x *CreateHmacKeyRequest) Reset() {
+ *x = CreateHmacKeyRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateHmacKeyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateHmacKeyRequest) ProtoMessage() {}
+
+func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead.
+func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *CreateHmacKeyRequest) GetProject() string {
+ if x != nil {
+ return x.Project
+ }
+ return ""
+}
+
+func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string {
+ if x != nil {
+ return x.ServiceAccountEmail
}
return ""
}
@@ -3670,7 +3430,7 @@ type CreateHmacKeyResponse struct {
func (x *CreateHmacKeyResponse) Reset() {
*x = CreateHmacKeyResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[35]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3683,7 +3443,7 @@ func (x *CreateHmacKeyResponse) String() string {
func (*CreateHmacKeyResponse) ProtoMessage() {}
func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[35]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3696,7 +3456,7 @@ func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead.
func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31}
}
func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata {
@@ -3730,7 +3490,7 @@ type DeleteHmacKeyRequest struct {
func (x *DeleteHmacKeyRequest) Reset() {
*x = DeleteHmacKeyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[36]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3743,7 +3503,7 @@ func (x *DeleteHmacKeyRequest) String() string {
func (*DeleteHmacKeyRequest) ProtoMessage() {}
func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[36]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3756,7 +3516,7 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead.
func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32}
}
func (x *DeleteHmacKeyRequest) GetAccessId() string {
@@ -3790,7 +3550,7 @@ type GetHmacKeyRequest struct {
func (x *GetHmacKeyRequest) Reset() {
*x = GetHmacKeyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3803,7 +3563,7 @@ func (x *GetHmacKeyRequest) String() string {
func (*GetHmacKeyRequest) ProtoMessage() {}
func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3816,7 +3576,7 @@ func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead.
func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33}
}
func (x *GetHmacKeyRequest) GetAccessId() string {
@@ -3856,7 +3616,7 @@ type ListHmacKeysRequest struct {
func (x *ListHmacKeysRequest) Reset() {
*x = ListHmacKeysRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[38]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3869,7 +3629,7 @@ func (x *ListHmacKeysRequest) String() string {
func (*ListHmacKeysRequest) ProtoMessage() {}
func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[38]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3882,7 +3642,7 @@ func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead.
func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34}
}
func (x *ListHmacKeysRequest) GetProject() string {
@@ -3936,7 +3696,7 @@ type ListHmacKeysResponse struct {
func (x *ListHmacKeysResponse) Reset() {
*x = ListHmacKeysResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[39]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3949,7 +3709,7 @@ func (x *ListHmacKeysResponse) String() string {
func (*ListHmacKeysResponse) ProtoMessage() {}
func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[39]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3962,7 +3722,7 @@ func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead.
func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35}
}
func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata {
@@ -4002,7 +3762,7 @@ type UpdateHmacKeyRequest struct {
func (x *UpdateHmacKeyRequest) Reset() {
*x = UpdateHmacKeyRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[40]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4015,7 +3775,7 @@ func (x *UpdateHmacKeyRequest) String() string {
func (*UpdateHmacKeyRequest) ProtoMessage() {}
func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[40]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4028,7 +3788,7 @@ func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead.
func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36}
}
func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata {
@@ -4045,6 +3805,123 @@ func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
return nil
}
+// Hmac Key Metadata, which includes all information other than the secret.
+type HmacKeyMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Immutable. Resource name ID of the key in the format
+ // {projectIdentifier}/{accessId}.
+ // {projectIdentifier} can be the project ID or project number.
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Immutable. Globally unique id for keys.
+ AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
+ // Immutable. Identifies the project that owns the service account of the
+ // specified HMAC key, in the format "projects/{projectIdentifier}".
+ // {projectIdentifier} can be the project ID or project number.
+ Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"`
+ // Output only. Email of the service account the key authenticates as.
+ ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+ // Optional. State of the key. One of ACTIVE, INACTIVE, or DELETED.
+ // Writable, can be updated by UpdateHmacKey operation.
+ State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"`
+ // Output only. The creation time of the HMAC key.
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ // Output only. The last modification time of the HMAC key metadata.
+ UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
+ // Optional. The etag of the HMAC key.
+ Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"`
+}
+
+func (x *HmacKeyMetadata) Reset() {
+ *x = HmacKeyMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HmacKeyMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HmacKeyMetadata) ProtoMessage() {}
+
+func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead.
+func (*HmacKeyMetadata) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37}
+}
+
+func (x *HmacKeyMetadata) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetAccessId() string {
+ if x != nil {
+ return x.AccessId
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetProject() string {
+ if x != nil {
+ return x.Project
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetServiceAccountEmail() string {
+ if x != nil {
+ return x.ServiceAccountEmail
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetState() string {
+ if x != nil {
+ return x.State
+ }
+ return ""
+}
+
+func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreateTime
+ }
+ return nil
+}
+
+func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.UpdateTime
+ }
+ return nil
+}
+
+func (x *HmacKeyMetadata) GetEtag() string {
+ if x != nil {
+ return x.Etag
+ }
+ return ""
+}
+
// Parameters that can be passed to any object request.
type CommonObjectRequestParams struct {
state protoimpl.MessageState
@@ -4065,7 +3942,7 @@ type CommonObjectRequestParams struct {
func (x *CommonObjectRequestParams) Reset() {
*x = CommonObjectRequestParams{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[41]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4078,7 +3955,7 @@ func (x *CommonObjectRequestParams) String() string {
func (*CommonObjectRequestParams) ProtoMessage() {}
func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[41]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4091,7 +3968,7 @@ func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message {
// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead.
func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38}
}
func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string {
@@ -4125,7 +4002,7 @@ type ServiceConstants struct {
func (x *ServiceConstants) Reset() {
*x = ServiceConstants{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[42]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4138,7 +4015,7 @@ func (x *ServiceConstants) String() string {
func (*ServiceConstants) ProtoMessage() {}
func (x *ServiceConstants) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[42]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[39]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4151,7 +4028,7 @@ func (x *ServiceConstants) ProtoReflect() protoreflect.Message {
// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead.
func (*ServiceConstants) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39}
}
// A bucket.
@@ -4268,7 +4145,8 @@ type Bucket struct {
// Reserved for future use.
SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"`
// Configuration that, if present, specifies the data placement for a
- // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region].
+ // [https://cloud.google.com/storage/docs/locations#location-dr][configurable
+ // dual-region].
CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"`
// The bucket's Autoclass configuration. If there is no configuration, the
// Autoclass feature will be disabled and have no effect on the bucket.
@@ -4285,7 +4163,7 @@ type Bucket struct {
func (x *Bucket) Reset() {
*x = Bucket{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[43]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4298,7 +4176,7 @@ func (x *Bucket) String() string {
func (*Bucket) ProtoMessage() {}
func (x *Bucket) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[43]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[40]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4311,7 +4189,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message {
// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
func (*Bucket) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40}
}
func (x *Bucket) GetName() string {
@@ -4574,7 +4452,7 @@ type BucketAccessControl struct {
func (x *BucketAccessControl) Reset() {
*x = BucketAccessControl{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[44]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4587,7 +4465,7 @@ func (x *BucketAccessControl) String() string {
func (*BucketAccessControl) ProtoMessage() {}
func (x *BucketAccessControl) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[44]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[41]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4600,7 +4478,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message {
// Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead.
func (*BucketAccessControl) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41}
}
func (x *BucketAccessControl) GetRole() string {
@@ -4682,7 +4560,7 @@ type ChecksummedData struct {
func (x *ChecksummedData) Reset() {
*x = ChecksummedData{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[45]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4695,7 +4573,7 @@ func (x *ChecksummedData) String() string {
func (*ChecksummedData) ProtoMessage() {}
func (x *ChecksummedData) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[45]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[42]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4708,7 +4586,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead.
func (*ChecksummedData) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42}
}
func (x *ChecksummedData) GetContent() []byte {
@@ -4741,15 +4619,15 @@ type ObjectChecksums struct {
// [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and
// ETags: Best Practices].
// Not all objects will provide an MD5 hash. For example, composite objects
- // provide only crc32c hashes.
- // This value is equivalent to running `cat object.txt | openssl md5 -binary`
+ // provide only crc32c hashes. This value is equivalent to running `cat
+ // object.txt | openssl md5 -binary`
Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"`
}
func (x *ObjectChecksums) Reset() {
*x = ObjectChecksums{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[46]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4762,7 +4640,7 @@ func (x *ObjectChecksums) String() string {
func (*ObjectChecksums) ProtoMessage() {}
func (x *ObjectChecksums) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[46]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[43]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4775,7 +4653,7 @@ func (x *ObjectChecksums) ProtoReflect() protoreflect.Message {
// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead.
func (*ObjectChecksums) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43}
}
func (x *ObjectChecksums) GetCrc32C() uint32 {
@@ -4792,239 +4670,9 @@ func (x *ObjectChecksums) GetMd5Hash() []byte {
return nil
}
-// Hmac Key Metadata, which includes all information other than the secret.
-type HmacKeyMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Immutable. Resource name ID of the key in the format
- // {projectIdentifier}/{accessId}.
- // {projectIdentifier} can be the project ID or project number.
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- // Immutable. Globally unique id for keys.
- AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
- // Immutable. Identifies the project that owns the service account of the
- // specified HMAC key, in the format "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"`
- // Output only. Email of the service account the key authenticates as.
- ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
- // State of the key. One of ACTIVE, INACTIVE, or DELETED.
- // Writable, can be updated by UpdateHmacKey operation.
- State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"`
- // Output only. The creation time of the HMAC key.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. The last modification time of the HMAC key metadata.
- UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
- // The etag of the HMAC key.
- Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"`
-}
-
-func (x *HmacKeyMetadata) Reset() {
- *x = HmacKeyMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[47]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HmacKeyMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HmacKeyMetadata) ProtoMessage() {}
-
-func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[47]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead.
-func (*HmacKeyMetadata) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47}
-}
-
-func (x *HmacKeyMetadata) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetAccessId() string {
- if x != nil {
- return x.AccessId
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetProject() string {
- if x != nil {
- return x.Project
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetServiceAccountEmail() string {
- if x != nil {
- return x.ServiceAccountEmail
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetState() string {
- if x != nil {
- return x.State
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.UpdateTime
- }
- return nil
-}
-
-func (x *HmacKeyMetadata) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-// A directive to publish Pub/Sub notifications upon changes to a bucket.
-type NotificationConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The resource name of this NotificationConfig.
- // Format:
- // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
- // The `{project}` portion may be `_` for globally unique buckets.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Required. The Pub/Sub topic to which this subscription publishes. Formatted
- // as:
- // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'
- Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
- // The etag of the NotificationConfig.
- // If included in the metadata of GetNotificationConfigRequest, the operation
- // will only be performed if the etag matches that of the NotificationConfig.
- Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"`
- // If present, only send notifications about listed event types. If
- // empty, sent notifications for all event types.
- EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"`
- // A list of additional attributes to attach to each Pub/Sub
- // message published for this NotificationConfig.
- CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // If present, only apply this NotificationConfig to object names that
- // begin with this prefix.
- ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"`
- // Required. The desired content of the Payload.
- PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"`
-}
-
-func (x *NotificationConfig) Reset() {
- *x = NotificationConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NotificationConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NotificationConfig) ProtoMessage() {}
-
-func (x *NotificationConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead.
-func (*NotificationConfig) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48}
-}
-
-func (x *NotificationConfig) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *NotificationConfig) GetTopic() string {
- if x != nil {
- return x.Topic
- }
- return ""
-}
-
-func (x *NotificationConfig) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-func (x *NotificationConfig) GetEventTypes() []string {
- if x != nil {
- return x.EventTypes
- }
- return nil
-}
-
-func (x *NotificationConfig) GetCustomAttributes() map[string]string {
- if x != nil {
- return x.CustomAttributes
- }
- return nil
-}
-
-func (x *NotificationConfig) GetObjectNamePrefix() string {
- if x != nil {
- return x.ObjectNamePrefix
- }
- return ""
-}
-
-func (x *NotificationConfig) GetPayloadFormat() string {
- if x != nil {
- return x.PayloadFormat
- }
- return ""
-}
-
-// Describes the Customer-Supplied Encryption Key mechanism used to store an
-// Object's data at rest.
-type CustomerEncryption struct {
+// Describes the Customer-Supplied Encryption Key mechanism used to store an
+// Object's data at rest.
+type CustomerEncryption struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -5039,7 +4687,7 @@ type CustomerEncryption struct {
func (x *CustomerEncryption) Reset() {
*x = CustomerEncryption{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[49]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5052,7 +4700,7 @@ func (x *CustomerEncryption) String() string {
func (*CustomerEncryption) ProtoMessage() {}
func (x *CustomerEncryption) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[49]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[44]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5065,7 +4713,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message {
// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead.
func (*CustomerEncryption) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44}
}
func (x *CustomerEncryption) GetEncryptionAlgorithm() string {
@@ -5148,7 +4796,10 @@ type Object struct {
// Components are accumulated by compose operations.
ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"`
// Output only. Hashes for the data part of this object. This field is used
- // for output only and will be silently ignored if provided in requests.
+ // for output only and will be silently ignored if provided in requests. The
+ // checksums of the complete object regardless of data range. If the object is
+ // downloaded in full, the client should compute one of these checksums over
+ // the downloaded object and compare it against the value provided here.
Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"`
// Output only. The modification time of the object metadata.
// Set initially to object creation time and then updated whenever any
@@ -5214,7 +4865,7 @@ type Object struct {
func (x *Object) Reset() {
*x = Object{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[50]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5227,7 +4878,7 @@ func (x *Object) String() string {
func (*Object) ProtoMessage() {}
func (x *Object) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[50]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[45]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5240,7 +4891,7 @@ func (x *Object) ProtoReflect() protoreflect.Message {
// Deprecated: Use Object.ProtoReflect.Descriptor instead.
func (*Object) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45}
}
func (x *Object) GetName() string {
@@ -5452,7 +5103,10 @@ type ObjectAccessControl struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The access permission for the entity.
+ // The access permission for the entity. One of the following values:
+ // * `READER`
+ // * `WRITER`
+ // * `OWNER`
Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
// The ID of the access-control entry.
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
@@ -5496,7 +5150,7 @@ type ObjectAccessControl struct {
func (x *ObjectAccessControl) Reset() {
*x = ObjectAccessControl{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[51]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[46]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5509,7 +5163,7 @@ func (x *ObjectAccessControl) String() string {
func (*ObjectAccessControl) ProtoMessage() {}
func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[51]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[46]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5522,7 +5176,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message {
// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead.
func (*ObjectAccessControl) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46}
}
func (x *ObjectAccessControl) GetRole() string {
@@ -5607,7 +5261,7 @@ type ListObjectsResponse struct {
func (x *ListObjectsResponse) Reset() {
*x = ListObjectsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[52]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[47]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5620,7 +5274,7 @@ func (x *ListObjectsResponse) String() string {
func (*ListObjectsResponse) ProtoMessage() {}
func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[52]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[47]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5633,7 +5287,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead.
func (*ListObjectsResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47}
}
func (x *ListObjectsResponse) GetObjects() []*Object {
@@ -5672,7 +5326,7 @@ type ProjectTeam struct {
func (x *ProjectTeam) Reset() {
*x = ProjectTeam{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[53]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[48]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5685,7 +5339,7 @@ func (x *ProjectTeam) String() string {
func (*ProjectTeam) ProtoMessage() {}
func (x *ProjectTeam) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[53]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[48]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5698,7 +5352,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message {
// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead.
func (*ProjectTeam) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48}
}
func (x *ProjectTeam) GetProjectNumber() string {
@@ -5715,57 +5369,6 @@ func (x *ProjectTeam) GetTeam() string {
return ""
}
-// A service account, owned by Cloud Storage, which may be used when taking
-// action on behalf of a given project, for example to publish Pub/Sub
-// notifications or to retrieve security keys.
-type ServiceAccount struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The ID of the notification.
- EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
-}
-
-func (x *ServiceAccount) Reset() {
- *x = ServiceAccount{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[54]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ServiceAccount) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ServiceAccount) ProtoMessage() {}
-
-func (x *ServiceAccount) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[54]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead.
-func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54}
-}
-
-func (x *ServiceAccount) GetEmailAddress() string {
- if x != nil {
- return x.EmailAddress
- }
- return ""
-}
-
// The owner of a specific resource.
type Owner struct {
state protoimpl.MessageState
@@ -5781,7 +5384,7 @@ type Owner struct {
func (x *Owner) Reset() {
*x = Owner{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[55]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[49]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5794,7 +5397,7 @@ func (x *Owner) String() string {
func (*Owner) ProtoMessage() {}
func (x *Owner) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[55]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[49]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5807,7 +5410,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message {
// Deprecated: Use Owner.ProtoReflect.Descriptor instead.
func (*Owner) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49}
}
func (x *Owner) GetEntity() string {
@@ -5841,7 +5444,7 @@ type ContentRange struct {
func (x *ContentRange) Reset() {
*x = ContentRange{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[56]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[50]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -5854,7 +5457,7 @@ func (x *ContentRange) String() string {
func (*ContentRange) ProtoMessage() {}
func (x *ContentRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[56]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[50]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5867,7 +5470,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message {
// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead.
func (*ContentRange) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50}
}
func (x *ContentRange) GetStart() int64 {
@@ -5891,38 +5494,33 @@ func (x *ContentRange) GetCompleteLength() int64 {
return 0
}
-// Description of a source object for a composition request.
-type ComposeObjectRequest_SourceObject struct {
+// Request message for DeleteNotificationConfig.
+type DeleteNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The source object's name. All source objects must reside in the
- // same bucket.
+ // Required. The parent bucket of the NotificationConfig.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The generation of this object to use as the source.
- Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"`
- // Conditions that must be met for this operation to execute.
- ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"`
}
-func (x *ComposeObjectRequest_SourceObject) Reset() {
- *x = ComposeObjectRequest_SourceObject{}
+func (x *DeleteNotificationConfigRequest) Reset() {
+ *x = DeleteNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[57]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[51]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ComposeObjectRequest_SourceObject) String() string {
+func (x *DeleteNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ComposeObjectRequest_SourceObject) ProtoMessage() {}
+func (*DeleteNotificationConfigRequest) ProtoMessage() {}
-func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[57]
+func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[51]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5933,61 +5531,47 @@ func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message
return mi.MessageOf(x)
}
-// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead.
-func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0}
+// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead.
+func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51}
}
-func (x *ComposeObjectRequest_SourceObject) GetName() string {
+func (x *DeleteNotificationConfigRequest) GetName() string {
if x != nil {
return x.Name
}
return ""
}
-func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 {
- if x != nil {
- return x.Generation
- }
- return 0
-}
-
-func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions {
- if x != nil {
- return x.ObjectPreconditions
- }
- return nil
-}
-
-// Preconditions for a source object of a composition request.
-type ComposeObjectRequest_SourceObject_ObjectPreconditions struct {
+// Request message for GetNotificationConfig.
+type GetNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Only perform the composition if the generation of the source object
- // that would be used matches this value. If this value and a generation
- // are both specified, they must be the same value or the call will fail.
- IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Required. The parent bucket of the NotificationConfig.
+ // Format:
+ // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
-func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() {
- *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{}
+func (x *GetNotificationConfigRequest) Reset() {
+ *x = GetNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[58]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[52]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string {
+func (x *GetNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {}
+func (*GetNotificationConfigRequest) ProtoMessage() {}
-func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[58]
+func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[52]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -5998,45 +5582,47 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() p
return mi.MessageOf(x)
}
-// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead.
-func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0}
+// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead.
+func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52}
}
-func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *GetNotificationConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
}
- return 0
+ return ""
}
-// Billing properties of a bucket.
-type Bucket_Billing struct {
+// Request message for CreateNotificationConfig.
+type CreateNotificationConfigRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // When set to true, Requester Pays is enabled for this bucket.
- RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"`
+ // Required. The bucket to which this NotificationConfig belongs.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. Properties of the NotificationConfig to be inserted.
+ NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"`
}
-func (x *Bucket_Billing) Reset() {
- *x = Bucket_Billing{}
+func (x *CreateNotificationConfigRequest) Reset() {
+ *x = CreateNotificationConfigRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[59]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[53]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Billing) String() string {
+func (x *CreateNotificationConfigRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Billing) ProtoMessage() {}
+func (*CreateNotificationConfigRequest) ProtoMessage() {}
-func (x *Bucket_Billing) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[59]
+func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[53]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6047,62 +5633,62 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead.
-func (*Bucket_Billing) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0}
+// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead.
+func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53}
}
-func (x *Bucket_Billing) GetRequesterPays() bool {
+func (x *CreateNotificationConfigRequest) GetParent() string {
if x != nil {
- return x.RequesterPays
+ return x.Parent
}
- return false
+ return ""
}
-// Cross-Origin Response sharing (CORS) properties for a bucket.
-// For more on Cloud Storage and CORS, see
-// https://cloud.google.com/storage/docs/cross-origin.
-// For more on CORS in general, see https://tools.ietf.org/html/rfc6454.
-type Bucket_Cors struct {
+func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig {
+ if x != nil {
+ return x.NotificationConfig
+ }
+ return nil
+}
+
+// Request message for ListNotifications.
+type ListNotificationConfigsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The list of Origins eligible to receive CORS response headers. See
- // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins.
- // Note: "*" is permitted in the list of origins, and means "any Origin".
- Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"`
- // The list of HTTP methods on which to include CORS response headers,
- // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of
- // methods, and means "any method".
- Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"`
- // The list of HTTP headers other than the
- // [https://www.w3.org/TR/cors/#simple-response-header][simple response
- // headers] to give permission for the user-agent to share across domains.
- ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"`
- // The value, in seconds, to return in the
- // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age
- // header] used in preflight responses.
- MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"`
+ // Required. Name of a Google Cloud Storage bucket.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. The maximum number of NotificationConfigs to return. The service
+ // may return fewer than this value. The default value is 100. Specifying a
+ // value above 100 will result in a page_size of 100.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. A page token, received from a previous `ListNotificationConfigs`
+ // call. Provide this to retrieve the subsequent page.
+ //
+ // When paginating, all other parameters provided to `ListNotificationConfigs`
+ // must match the call that provided the page token.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
}
-func (x *Bucket_Cors) Reset() {
- *x = Bucket_Cors{}
+func (x *ListNotificationConfigsRequest) Reset() {
+ *x = ListNotificationConfigsRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[60]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[54]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Cors) String() string {
+func (x *ListNotificationConfigsRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Cors) ProtoMessage() {}
+func (*ListNotificationConfigsRequest) ProtoMessage() {}
-func (x *Bucket_Cors) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[60]
+func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[54]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6113,67 +5699,62 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead.
-func (*Bucket_Cors) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1}
+// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead.
+func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54}
}
-func (x *Bucket_Cors) GetOrigin() []string {
+func (x *ListNotificationConfigsRequest) GetParent() string {
if x != nil {
- return x.Origin
+ return x.Parent
}
- return nil
+ return ""
}
-func (x *Bucket_Cors) GetMethod() []string {
+func (x *ListNotificationConfigsRequest) GetPageSize() int32 {
if x != nil {
- return x.Method
+ return x.PageSize
}
- return nil
+ return 0
}
-func (x *Bucket_Cors) GetResponseHeader() []string {
+func (x *ListNotificationConfigsRequest) GetPageToken() string {
if x != nil {
- return x.ResponseHeader
+ return x.PageToken
}
- return nil
+ return ""
}
-func (x *Bucket_Cors) GetMaxAgeSeconds() int32 {
- if x != nil {
- return x.MaxAgeSeconds
- }
- return 0
-}
-
-// Encryption properties of a bucket.
-type Bucket_Encryption struct {
+// The result of a call to ListNotificationConfigs
+type ListNotificationConfigsResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The name of the Cloud KMS key that will be used to encrypt objects
- // inserted into this bucket, if no encryption method is specified.
- DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"`
+ // The list of items.
+ NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"`
+ // A token, which can be sent as `page_token` to retrieve the next page.
+ // If this field is omitted, there are no subsequent pages.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
}
-func (x *Bucket_Encryption) Reset() {
- *x = Bucket_Encryption{}
+func (x *ListNotificationConfigsResponse) Reset() {
+ *x = ListNotificationConfigsResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[61]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[55]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Encryption) String() string {
+func (x *ListNotificationConfigsResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Encryption) ProtoMessage() {}
+func (*ListNotificationConfigsResponse) ProtoMessage() {}
-func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[61]
+func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[55]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6184,48 +5765,74 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead.
-func (*Bucket_Encryption) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2}
+// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead.
+func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55}
}
-func (x *Bucket_Encryption) GetDefaultKmsKey() string {
+func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig {
if x != nil {
- return x.DefaultKmsKey
+ return x.NotificationConfigs
+ }
+ return nil
+}
+
+func (x *ListNotificationConfigsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
}
return ""
}
-// Bucket restriction options.
-type Bucket_IamConfig struct {
+// A directive to publish Pub/Sub notifications upon changes to a bucket.
+type NotificationConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Bucket restriction options currently enforced on the bucket.
- UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"`
- // Whether IAM will enforce public access prevention. Valid values are
- // "enforced" or "inherited".
- PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"`
+ // Required. The resource name of this NotificationConfig.
+ // Format:
+ // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
+ // The `{project}` portion may be `_` for globally unique buckets.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The Pub/Sub topic to which this subscription publishes. Formatted
+ // as:
+ // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'
+ Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
+ // Optional. The etag of the NotificationConfig.
+ // If included in the metadata of GetNotificationConfigRequest, the operation
+ // will only be performed if the etag matches that of the NotificationConfig.
+ Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"`
+ // Optional. If present, only send notifications about listed event types. If
+ // empty, sent notifications for all event types.
+ EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"`
+ // Optional. A list of additional attributes to attach to each Pub/Sub
+ // message published for this NotificationConfig.
+ CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Optional. If present, only apply this NotificationConfig to object names
+ // that begin with this prefix.
+ ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"`
+ // Required. The desired content of the Payload.
+ PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"`
}
-func (x *Bucket_IamConfig) Reset() {
- *x = Bucket_IamConfig{}
+func (x *NotificationConfig) Reset() {
+ *x = NotificationConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[62]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[56]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_IamConfig) String() string {
+func (x *NotificationConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_IamConfig) ProtoMessage() {}
+func (*NotificationConfig) ProtoMessage() {}
-func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[62]
+func (x *NotificationConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[56]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6236,106 +5843,92 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead.
-func (*Bucket_IamConfig) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3}
+// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead.
+func (*NotificationConfig) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56}
}
-func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess {
+func (x *NotificationConfig) GetName() string {
if x != nil {
- return x.UniformBucketLevelAccess
+ return x.Name
}
- return nil
+ return ""
}
-func (x *Bucket_IamConfig) GetPublicAccessPrevention() string {
+func (x *NotificationConfig) GetTopic() string {
if x != nil {
- return x.PublicAccessPrevention
+ return x.Topic
}
return ""
}
-// Lifecycle properties of a bucket.
-// For more information, see https://cloud.google.com/storage/docs/lifecycle.
-type Bucket_Lifecycle struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // A lifecycle management rule, which is made of an action to take and the
- // condition(s) under which the action will be taken.
- Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"`
-}
-
-func (x *Bucket_Lifecycle) Reset() {
- *x = Bucket_Lifecycle{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[63]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *NotificationConfig) GetEtag() string {
+ if x != nil {
+ return x.Etag
}
+ return ""
}
-func (x *Bucket_Lifecycle) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *NotificationConfig) GetEventTypes() []string {
+ if x != nil {
+ return x.EventTypes
+ }
+ return nil
}
-func (*Bucket_Lifecycle) ProtoMessage() {}
-
-func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[63]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *NotificationConfig) GetCustomAttributes() map[string]string {
+ if x != nil {
+ return x.CustomAttributes
}
- return mi.MessageOf(x)
+ return nil
}
-// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead.
-func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4}
+func (x *NotificationConfig) GetObjectNamePrefix() string {
+ if x != nil {
+ return x.ObjectNamePrefix
+ }
+ return ""
}
-func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule {
+func (x *NotificationConfig) GetPayloadFormat() string {
if x != nil {
- return x.Rule
+ return x.PayloadFormat
}
- return nil
+ return ""
}
-// Logging-related properties of a bucket.
-type Bucket_Logging struct {
+// Description of a source object for a composition request.
+type ComposeObjectRequest_SourceObject struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The destination bucket where the current bucket's logs should be placed,
- // using path format (like `projects/123456/buckets/foo`).
- LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"`
- // A prefix for log object names.
- LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"`
+ // Required. The source object's name. All source objects must reside in the
+ // same bucket.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The generation of this object to use as the source.
+ Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Conditions that must be met for this operation to execute.
+ ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"`
}
-func (x *Bucket_Logging) Reset() {
- *x = Bucket_Logging{}
+func (x *ComposeObjectRequest_SourceObject) Reset() {
+ *x = ComposeObjectRequest_SourceObject{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[57]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Logging) String() string {
+func (x *ComposeObjectRequest_SourceObject) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Logging) ProtoMessage() {}
+func (*ComposeObjectRequest_SourceObject) ProtoMessage() {}
-func (x *Bucket_Logging) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[57]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6346,61 +5939,61 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead.
-func (*Bucket_Logging) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5}
+// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead.
+func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0}
}
-func (x *Bucket_Logging) GetLogBucket() string {
+func (x *ComposeObjectRequest_SourceObject) GetName() string {
if x != nil {
- return x.LogBucket
+ return x.Name
}
return ""
}
-func (x *Bucket_Logging) GetLogObjectPrefix() string {
+func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 {
if x != nil {
- return x.LogObjectPrefix
+ return x.Generation
}
- return ""
+ return 0
}
-// Retention policy properties of a bucket.
-type Bucket_RetentionPolicy struct {
+func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions {
+ if x != nil {
+ return x.ObjectPreconditions
+ }
+ return nil
+}
+
+// Preconditions for a source object of a composition request.
+type ComposeObjectRequest_SourceObject_ObjectPreconditions struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Server-determined value that indicates the time from which policy was
- // enforced and effective.
- EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"`
- // Once locked, an object retention policy cannot be modified.
- IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"`
- // The duration that objects need to be retained. Retention duration must be
- // greater than zero and less than 100 years. Note that enforcement of
- // retention periods less than a day is not guaranteed. Such periods should
- // only be used for testing purposes. Any `nanos` value specified will be
- // rounded down to the nearest second.
- RetentionDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"`
+ // Only perform the composition if the generation of the source object
+ // that would be used matches this value. If this value and a generation
+ // are both specified, they must be the same value or the call will fail.
+ IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
}
-func (x *Bucket_RetentionPolicy) Reset() {
- *x = Bucket_RetentionPolicy{}
+func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() {
+ *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[65]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[58]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_RetentionPolicy) String() string {
+func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_RetentionPolicy) ProtoMessage() {}
+func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {}
-func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[65]
+func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[58]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6411,63 +6004,45 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead.
-func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6}
+// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead.
+func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0, 0}
}
-func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EffectiveTime
+func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
- return nil
+ return 0
}
-func (x *Bucket_RetentionPolicy) GetIsLocked() bool {
- if x != nil {
- return x.IsLocked
- }
- return false
-}
-
-func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration {
- if x != nil {
- return x.RetentionDuration
- }
- return nil
-}
-
-// Soft delete policy properties of a bucket.
-type Bucket_SoftDeletePolicy struct {
+// Billing properties of a bucket.
+type Bucket_Billing struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The period of time that soft-deleted objects in the bucket must be
- // retained and cannot be permanently deleted. The duration must be greater
- // than or equal to 7 days and less than 1 year.
- RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"`
- // Time from which the policy was effective. This is service-provided.
- EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"`
+ // When set to true, Requester Pays is enabled for this bucket.
+ RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"`
}
-func (x *Bucket_SoftDeletePolicy) Reset() {
- *x = Bucket_SoftDeletePolicy{}
+func (x *Bucket_Billing) Reset() {
+ *x = Bucket_Billing{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[66]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[59]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_SoftDeletePolicy) String() string {
+func (x *Bucket_Billing) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_SoftDeletePolicy) ProtoMessage() {}
+func (*Bucket_Billing) ProtoMessage() {}
-func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[66]
+func (x *Bucket_Billing) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[59]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6478,54 +6053,62 @@ func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead.
-func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7}
-}
-
-func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration {
- if x != nil {
- return x.RetentionDuration
- }
- return nil
+// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead.
+func (*Bucket_Billing) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0}
}
-func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp {
+func (x *Bucket_Billing) GetRequesterPays() bool {
if x != nil {
- return x.EffectiveTime
+ return x.RequesterPays
}
- return nil
+ return false
}
-// Properties of a bucket related to versioning.
-// For more on Cloud Storage versioning, see
-// https://cloud.google.com/storage/docs/object-versioning.
-type Bucket_Versioning struct {
+// Cross-Origin Response sharing (CORS) properties for a bucket.
+// For more on Cloud Storage and CORS, see
+// https://cloud.google.com/storage/docs/cross-origin.
+// For more on CORS in general, see https://tools.ietf.org/html/rfc6454.
+type Bucket_Cors struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // While set to true, versioning is fully enabled for this bucket.
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // The list of Origins eligible to receive CORS response headers. See
+ // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins.
+ // Note: "*" is permitted in the list of origins, and means "any Origin".
+ Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"`
+ // The list of HTTP methods on which to include CORS response headers,
+ // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of
+ // methods, and means "any method".
+ Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"`
+ // The list of HTTP headers other than the
+ // [https://www.w3.org/TR/cors/#simple-response-header][simple response
+ // headers] to give permission for the user-agent to share across domains.
+ ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"`
+ // The value, in seconds, to return in the
+ // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age
+ // header] used in preflight responses.
+ MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"`
}
-func (x *Bucket_Versioning) Reset() {
- *x = Bucket_Versioning{}
+func (x *Bucket_Cors) Reset() {
+ *x = Bucket_Cors{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[67]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[60]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Versioning) String() string {
+func (x *Bucket_Cors) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Versioning) ProtoMessage() {}
+func (*Bucket_Cors) ProtoMessage() {}
-func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[67]
+func (x *Bucket_Cors) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[60]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6536,56 +6119,67 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead.
-func (*Bucket_Versioning) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8}
+// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead.
+func (*Bucket_Cors) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1}
}
-func (x *Bucket_Versioning) GetEnabled() bool {
+func (x *Bucket_Cors) GetOrigin() []string {
if x != nil {
- return x.Enabled
+ return x.Origin
}
- return false
+ return nil
}
-// Properties of a bucket related to accessing the contents as a static
-// website. For more on hosting a static website via Cloud Storage, see
-// https://cloud.google.com/storage/docs/hosting-static-website.
-type Bucket_Website struct {
+func (x *Bucket_Cors) GetMethod() []string {
+ if x != nil {
+ return x.Method
+ }
+ return nil
+}
+
+func (x *Bucket_Cors) GetResponseHeader() []string {
+ if x != nil {
+ return x.ResponseHeader
+ }
+ return nil
+}
+
+func (x *Bucket_Cors) GetMaxAgeSeconds() int32 {
+ if x != nil {
+ return x.MaxAgeSeconds
+ }
+ return 0
+}
+
+// Encryption properties of a bucket.
+type Bucket_Encryption struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // If the requested object path is missing, the service will ensure the path
- // has a trailing '/', append this suffix, and attempt to retrieve the
- // resulting object. This allows the creation of `index.html`
- // objects to represent directory pages.
- MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"`
- // If the requested object path is missing, and any
- // `mainPageSuffix` object is missing, if applicable, the service
- // will return the named object from this bucket as the content for a
- // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found]
- // result.
- NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"`
+ // The name of the Cloud KMS key that will be used to encrypt objects
+ // inserted into this bucket, if no encryption method is specified.
+ DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"`
}
-func (x *Bucket_Website) Reset() {
- *x = Bucket_Website{}
+func (x *Bucket_Encryption) Reset() {
+ *x = Bucket_Encryption{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[68]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[61]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Website) String() string {
+func (x *Bucket_Encryption) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Website) ProtoMessage() {}
+func (*Bucket_Encryption) ProtoMessage() {}
-func (x *Bucket_Website) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[68]
+func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[61]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6596,54 +6190,48 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead.
-func (*Bucket_Website) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9}
-}
-
-func (x *Bucket_Website) GetMainPageSuffix() string {
- if x != nil {
- return x.MainPageSuffix
- }
- return ""
+// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead.
+func (*Bucket_Encryption) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2}
}
-func (x *Bucket_Website) GetNotFoundPage() string {
+func (x *Bucket_Encryption) GetDefaultKmsKey() string {
if x != nil {
- return x.NotFoundPage
+ return x.DefaultKmsKey
}
return ""
}
-// Configuration for Custom Dual Regions. It should specify precisely two
-// eligible regions within the same Multiregion. More information on regions
-// may be found [https://cloud.google.com/storage/docs/locations][here].
-type Bucket_CustomPlacementConfig struct {
+// Bucket restriction options.
+type Bucket_IamConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // List of locations to use for data placement.
- DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"`
+ // Bucket restriction options currently enforced on the bucket.
+ UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"`
+ // Whether IAM will enforce public access prevention. Valid values are
+ // "enforced" or "inherited".
+ PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"`
}
-func (x *Bucket_CustomPlacementConfig) Reset() {
- *x = Bucket_CustomPlacementConfig{}
+func (x *Bucket_IamConfig) Reset() {
+ *x = Bucket_IamConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[69]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[62]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_CustomPlacementConfig) String() string {
+func (x *Bucket_IamConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_CustomPlacementConfig) ProtoMessage() {}
+func (*Bucket_IamConfig) ProtoMessage() {}
-func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[69]
+func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[62]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6654,57 +6242,54 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead.
-func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10}
+// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead.
+func (*Bucket_IamConfig) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3}
}
-func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string {
+func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess {
if x != nil {
- return x.DataLocations
+ return x.UniformBucketLevelAccess
}
return nil
}
-// Configuration for a bucket's Autoclass feature.
-type Bucket_Autoclass struct {
+func (x *Bucket_IamConfig) GetPublicAccessPrevention() string {
+ if x != nil {
+ return x.PublicAccessPrevention
+ }
+ return ""
+}
+
+// Lifecycle properties of a bucket.
+// For more information, see https://cloud.google.com/storage/docs/lifecycle.
+type Bucket_Lifecycle struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Enables Autoclass.
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
- // Output only. Latest instant at which the `enabled` field was set to true
- // after being disabled/unconfigured or set to false after being enabled. If
- // Autoclass is enabled when the bucket is created, the toggle_time is set
- // to the bucket creation time.
- ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"`
- // An object in an Autoclass bucket will eventually cool down to the
- // terminal storage class if there is no access to the object.
- // The only valid values are NEARLINE and ARCHIVE.
- TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"`
- // Output only. Latest instant at which the autoclass terminal storage class
- // was updated.
- TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"`
+ // A lifecycle management rule, which is made of an action to take and the
+ // condition(s) under which the action will be taken.
+ Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"`
}
-func (x *Bucket_Autoclass) Reset() {
- *x = Bucket_Autoclass{}
+func (x *Bucket_Lifecycle) Reset() {
+ *x = Bucket_Lifecycle{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[63]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Autoclass) String() string {
+func (x *Bucket_Lifecycle) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Autoclass) ProtoMessage() {}
+func (*Bucket_Lifecycle) ProtoMessage() {}
-func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[63]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6715,66 +6300,113 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead.
-func (*Bucket_Autoclass) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11}
+// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead.
+func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4}
}
-func (x *Bucket_Autoclass) GetEnabled() bool {
+func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule {
if x != nil {
- return x.Enabled
+ return x.Rule
}
- return false
+ return nil
}
-func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp {
- if x != nil {
- return x.ToggleTime
+// Logging-related properties of a bucket.
+type Bucket_Logging struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The destination bucket where the current bucket's logs should be placed,
+ // using path format (like `projects/123456/buckets/foo`).
+ LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"`
+ // A prefix for log object names.
+ LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"`
+}
+
+func (x *Bucket_Logging) Reset() {
+ *x = Bucket_Logging{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-func (x *Bucket_Autoclass) GetTerminalStorageClass() string {
- if x != nil && x.TerminalStorageClass != nil {
- return *x.TerminalStorageClass
+func (x *Bucket_Logging) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Logging) ProtoMessage() {}
+
+func (x *Bucket_Logging) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead.
+func (*Bucket_Logging) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5}
+}
+
+func (x *Bucket_Logging) GetLogBucket() string {
+ if x != nil {
+ return x.LogBucket
}
return ""
}
-func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp {
+func (x *Bucket_Logging) GetLogObjectPrefix() string {
if x != nil {
- return x.TerminalStorageClassUpdateTime
+ return x.LogObjectPrefix
}
- return nil
+ return ""
}
-// Configuration for a bucket's hierarchical namespace feature.
-type Bucket_HierarchicalNamespace struct {
+// Retention policy properties of a bucket.
+type Bucket_RetentionPolicy struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Optional. Enables the hierarchical namespace feature.
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Server-determined value that indicates the time from which policy was
+ // enforced and effective.
+ EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"`
+ // Once locked, an object retention policy cannot be modified.
+ IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"`
+ // The duration that objects need to be retained. Retention duration must be
+ // greater than zero and less than 100 years. Note that enforcement of
+ // retention periods less than a day is not guaranteed. Such periods should
+ // only be used for testing purposes. Any `nanos` value specified will be
+ // rounded down to the nearest second.
+ RetentionDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"`
}
-func (x *Bucket_HierarchicalNamespace) Reset() {
- *x = Bucket_HierarchicalNamespace{}
+func (x *Bucket_RetentionPolicy) Reset() {
+ *x = Bucket_RetentionPolicy{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[65]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_HierarchicalNamespace) String() string {
+func (x *Bucket_RetentionPolicy) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_HierarchicalNamespace) ProtoMessage() {}
+func (*Bucket_RetentionPolicy) ProtoMessage() {}
-func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[65]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6785,51 +6417,63 @@ func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead.
-func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12}
+// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead.
+func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6}
}
-func (x *Bucket_HierarchicalNamespace) GetEnabled() bool {
+func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp {
if x != nil {
- return x.Enabled
+ return x.EffectiveTime
+ }
+ return nil
+}
+
+func (x *Bucket_RetentionPolicy) GetIsLocked() bool {
+ if x != nil {
+ return x.IsLocked
}
return false
}
-// Settings for Uniform Bucket level access.
-// See https://cloud.google.com/storage/docs/uniform-bucket-level-access.
-type Bucket_IamConfig_UniformBucketLevelAccess struct {
+func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration {
+ if x != nil {
+ return x.RetentionDuration
+ }
+ return nil
+}
+
+// Soft delete policy properties of a bucket.
+type Bucket_SoftDeletePolicy struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // If set, access checks only use bucket-level IAM policies or above.
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
- // The deadline time for changing
- // `iam_config.uniform_bucket_level_access.enabled` from `true` to
- // `false`. Mutable until the specified deadline is reached, but not
- // afterward.
- LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"`
+ // The period of time that soft-deleted objects in the bucket must be
+ // retained and cannot be permanently deleted. The duration must be greater
+ // than or equal to 7 days and less than 1 year.
+ RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"`
+ // Time from which the policy was effective. This is service-provided.
+ EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"`
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() {
- *x = Bucket_IamConfig_UniformBucketLevelAccess{}
+func (x *Bucket_SoftDeletePolicy) Reset() {
+ *x = Bucket_SoftDeletePolicy{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[66]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string {
+func (x *Bucket_SoftDeletePolicy) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {}
+func (*Bucket_SoftDeletePolicy) ProtoMessage() {}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[66]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6840,55 +6484,54 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead.
-func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0}
+// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead.
+func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7}
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool {
+func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration {
if x != nil {
- return x.Enabled
+ return x.RetentionDuration
}
- return false
+ return nil
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp {
+func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp {
if x != nil {
- return x.LockTime
+ return x.EffectiveTime
}
return nil
}
-// A lifecycle Rule, combining an action to take on an object and a
-// condition which will trigger that action.
-type Bucket_Lifecycle_Rule struct {
+// Properties of a bucket related to versioning.
+// For more on Cloud Storage versioning, see
+// https://cloud.google.com/storage/docs/object-versioning.
+type Bucket_Versioning struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The action to take.
- Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"`
- // The condition(s) under which the action will be taken.
- Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"`
+ // While set to true, versioning is fully enabled for this bucket.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
}
-func (x *Bucket_Lifecycle_Rule) Reset() {
- *x = Bucket_Lifecycle_Rule{}
+func (x *Bucket_Versioning) Reset() {
+ *x = Bucket_Versioning{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[74]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[67]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Lifecycle_Rule) String() string {
+func (x *Bucket_Versioning) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Lifecycle_Rule) ProtoMessage() {}
+func (*Bucket_Versioning) ProtoMessage() {}
-func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[74]
+func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[67]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6899,56 +6542,56 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead.
-func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0}
-}
-
-func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action {
- if x != nil {
- return x.Action
- }
- return nil
+// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead.
+func (*Bucket_Versioning) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8}
}
-func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition {
+func (x *Bucket_Versioning) GetEnabled() bool {
if x != nil {
- return x.Condition
+ return x.Enabled
}
- return nil
+ return false
}
-// An action to take on an object.
-type Bucket_Lifecycle_Rule_Action struct {
+// Properties of a bucket related to accessing the contents as a static
+// website. For more on hosting a static website via Cloud Storage, see
+// https://cloud.google.com/storage/docs/hosting-static-website.
+type Bucket_Website struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Type of the action. Currently, only `Delete`, `SetStorageClass`, and
- // `AbortIncompleteMultipartUpload` are supported.
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- // Target storage class. Required iff the type of the action is
- // SetStorageClass.
- StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"`
+ // If the requested object path is missing, the service will ensure the path
+ // has a trailing '/', append this suffix, and attempt to retrieve the
+ // resulting object. This allows the creation of `index.html`
+ // objects to represent directory pages.
+ MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"`
+ // If the requested object path is missing, and any
+ // `mainPageSuffix` object is missing, if applicable, the service
+ // will return the named object from this bucket as the content for a
+ // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found]
+ // result.
+ NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"`
}
-func (x *Bucket_Lifecycle_Rule_Action) Reset() {
- *x = Bucket_Lifecycle_Rule_Action{}
+func (x *Bucket_Website) Reset() {
+ *x = Bucket_Website{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[75]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[68]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Lifecycle_Rule_Action) String() string {
+func (x *Bucket_Website) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {}
+func (*Bucket_Website) ProtoMessage() {}
-func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[75]
+func (x *Bucket_Website) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[68]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -6959,94 +6602,54 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead.
-func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0}
+// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead.
+func (*Bucket_Website) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9}
}
-func (x *Bucket_Lifecycle_Rule_Action) GetType() string {
+func (x *Bucket_Website) GetMainPageSuffix() string {
if x != nil {
- return x.Type
+ return x.MainPageSuffix
}
return ""
}
-func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string {
+func (x *Bucket_Website) GetNotFoundPage() string {
if x != nil {
- return x.StorageClass
+ return x.NotFoundPage
}
return ""
}
-// A condition of an object which triggers some action.
-type Bucket_Lifecycle_Rule_Condition struct {
+// Configuration for Custom Dual Regions. It should specify precisely two
+// eligible regions within the same Multiregion. More information on regions
+// may be found [https://cloud.google.com/storage/docs/locations][here].
+type Bucket_CustomPlacementConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Age of an object (in days). This condition is satisfied when an
- // object reaches the specified age.
- // A value of 0 indicates that all objects immediately match this
- // condition.
- AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"`
- // This condition is satisfied when an object is created before midnight
- // of the specified date in UTC.
- CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"`
- // Relevant only for versioned objects. If the value is
- // `true`, this condition matches live objects; if the value
- // is `false`, it matches archived objects.
- IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"`
- // Relevant only for versioned objects. If the value is N, this
- // condition is satisfied when there are at least N versions (including
- // the live version) newer than this version of the object.
- NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"`
- // Objects having any of the storage classes specified by this condition
- // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
- // `NEARLINE`, `COLDLINE`, `STANDARD`, and
- // `DURABLE_REDUCED_AVAILABILITY`.
- MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"`
- // Number of days that have elapsed since the custom timestamp set on an
- // object.
- // The value of the field must be a nonnegative integer.
- DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"`
- // An object matches this condition if the custom timestamp set on the
- // object is before the specified date in UTC.
- CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"`
- // This condition is relevant only for versioned objects. An object
- // version satisfies this condition only if these many days have been
- // passed since it became noncurrent. The value of the field must be a
- // nonnegative integer. If it's zero, the object version will become
- // eligible for Lifecycle action as soon as it becomes noncurrent.
- DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"`
- // This condition is relevant only for versioned objects. An object
- // version satisfies this condition only if it became noncurrent before
- // the specified date in UTC.
- NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"`
- // List of object name prefixes. If any prefix exactly matches the
- // beginning of the object name, the condition evaluates to true.
- MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"`
- // List of object name suffixes. If any suffix exactly matches the
- // end of the object name, the condition evaluates to true.
- MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"`
+ // List of locations to use for data placement.
+ DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"`
}
-func (x *Bucket_Lifecycle_Rule_Condition) Reset() {
- *x = Bucket_Lifecycle_Rule_Condition{}
+func (x *Bucket_CustomPlacementConfig) Reset() {
+ *x = Bucket_CustomPlacementConfig{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[76]
+ mi := &file_google_storage_v2_storage_proto_msgTypes[69]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Bucket_Lifecycle_Rule_Condition) String() string {
+func (x *Bucket_CustomPlacementConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {}
+func (*Bucket_CustomPlacementConfig) ProtoMessage() {}
-func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[76]
+func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[69]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -7057,108 +6660,511 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead.
-func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1}
+// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead.
+func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10}
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 {
- if x != nil && x.AgeDays != nil {
- return *x.AgeDays
+func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string {
+ if x != nil {
+ return x.DataLocations
}
- return 0
+ return nil
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date {
- if x != nil {
- return x.CreatedBefore
+// Configuration for a bucket's Autoclass feature.
+type Bucket_Autoclass struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Enables Autoclass.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Output only. Latest instant at which the `enabled` field was set to true
+ // after being disabled/unconfigured or set to false after being enabled. If
+ // Autoclass is enabled when the bucket is created, the toggle_time is set
+ // to the bucket creation time.
+ ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"`
+ // An object in an Autoclass bucket will eventually cool down to the
+ // terminal storage class if there is no access to the object.
+ // The only valid values are NEARLINE and ARCHIVE.
+ TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"`
+ // Output only. Latest instant at which the autoclass terminal storage class
+ // was updated.
+ TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"`
+}
+
+func (x *Bucket_Autoclass) Reset() {
+ *x = Bucket_Autoclass{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool {
- if x != nil && x.IsLive != nil {
- return *x.IsLive
+func (x *Bucket_Autoclass) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Autoclass) ProtoMessage() {}
+
+func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return false
+ return mi.MessageOf(x)
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 {
- if x != nil && x.NumNewerVersions != nil {
- return *x.NumNewerVersions
+// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead.
+func (*Bucket_Autoclass) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 11}
+}
+
+func (x *Bucket_Autoclass) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
}
- return 0
+ return false
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string {
+func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp {
if x != nil {
- return x.MatchesStorageClass
+ return x.ToggleTime
}
return nil
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 {
- if x != nil && x.DaysSinceCustomTime != nil {
- return *x.DaysSinceCustomTime
+func (x *Bucket_Autoclass) GetTerminalStorageClass() string {
+ if x != nil && x.TerminalStorageClass != nil {
+ return *x.TerminalStorageClass
}
- return 0
+ return ""
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date {
+func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp {
if x != nil {
- return x.CustomTimeBefore
+ return x.TerminalStorageClassUpdateTime
}
return nil
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 {
- if x != nil && x.DaysSinceNoncurrentTime != nil {
- return *x.DaysSinceNoncurrentTime
+// Configuration for a bucket's hierarchical namespace feature.
+type Bucket_HierarchicalNamespace struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. Enables the hierarchical namespace feature.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+}
+
+func (x *Bucket_HierarchicalNamespace) Reset() {
+ *x = Bucket_HierarchicalNamespace{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return 0
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date {
- if x != nil {
- return x.NoncurrentTimeBefore
+func (x *Bucket_HierarchicalNamespace) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_HierarchicalNamespace) ProtoMessage() {}
+
+func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string {
+// Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead.
+func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 12}
+}
+
+func (x *Bucket_HierarchicalNamespace) GetEnabled() bool {
if x != nil {
- return x.MatchesPrefix
+ return x.Enabled
}
- return nil
+ return false
}
-func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string {
- if x != nil {
- return x.MatchesSuffix
+// Settings for Uniform Bucket level access.
+// See https://cloud.google.com/storage/docs/uniform-bucket-level-access.
+type Bucket_IamConfig_UniformBucketLevelAccess struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // If set, access checks only use bucket-level IAM policies or above.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // The deadline time for changing
+ // `iam_config.uniform_bucket_level_access.enabled` from `true` to
+ // `false`. Mutable until the specified deadline is reached, but not
+ // afterward.
+ LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"`
+}
+
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() {
+ *x = Bucket_IamConfig_UniformBucketLevelAccess{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
- return nil
}
-var File_google_storage_v2_storage_proto protoreflect.FileDescriptor
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
-var file_google_storage_v2_storage_proto_rawDesc = []byte{
- 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
- 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
- 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
- 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
- 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {}
+
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead.
+func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0}
+}
+
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LockTime
+ }
+ return nil
+}
+
+// A lifecycle Rule, combining an action to take on an object and a
+// condition which will trigger that action.
+type Bucket_Lifecycle_Rule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The action to take.
+ Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"`
+ // The condition(s) under which the action will be taken.
+ Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"`
+}
+
+func (x *Bucket_Lifecycle_Rule) Reset() {
+ *x = Bucket_Lifecycle_Rule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[74]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bucket_Lifecycle_Rule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Lifecycle_Rule) ProtoMessage() {}
+
+func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[74]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead.
+func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0}
+}
+
+func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action {
+ if x != nil {
+ return x.Action
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition {
+ if x != nil {
+ return x.Condition
+ }
+ return nil
+}
+
+// An action to take on an object.
+type Bucket_Lifecycle_Rule_Action struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Type of the action. Currently, only `Delete`, `SetStorageClass`, and
+ // `AbortIncompleteMultipartUpload` are supported.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Target storage class. Required iff the type of the action is
+ // SetStorageClass.
+ StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"`
+}
+
+func (x *Bucket_Lifecycle_Rule_Action) Reset() {
+ *x = Bucket_Lifecycle_Rule_Action{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[75]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bucket_Lifecycle_Rule_Action) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {}
+
+func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[75]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead.
+func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0}
+}
+
+func (x *Bucket_Lifecycle_Rule_Action) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string {
+ if x != nil {
+ return x.StorageClass
+ }
+ return ""
+}
+
+// A condition of an object which triggers some action.
+type Bucket_Lifecycle_Rule_Condition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Age of an object (in days). This condition is satisfied when an
+ // object reaches the specified age.
+ // A value of 0 indicates that all objects immediately match this
+ // condition.
+ AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"`
+ // This condition is satisfied when an object is created before midnight
+ // of the specified date in UTC.
+ CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"`
+ // Relevant only for versioned objects. If the value is
+ // `true`, this condition matches live objects; if the value
+ // is `false`, it matches archived objects.
+ IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"`
+ // Relevant only for versioned objects. If the value is N, this
+ // condition is satisfied when there are at least N versions (including
+ // the live version) newer than this version of the object.
+ NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"`
+ // Objects having any of the storage classes specified by this condition
+ // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`,
+ // `NEARLINE`, `COLDLINE`, `STANDARD`, and
+ // `DURABLE_REDUCED_AVAILABILITY`.
+ MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"`
+ // Number of days that have elapsed since the custom timestamp set on an
+ // object.
+ // The value of the field must be a nonnegative integer.
+ DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"`
+ // An object matches this condition if the custom timestamp set on the
+ // object is before the specified date in UTC.
+ CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"`
+ // This condition is relevant only for versioned objects. An object
+ // version satisfies this condition only if these many days have been
+ // passed since it became noncurrent. The value of the field must be a
+ // nonnegative integer. If it's zero, the object version will become
+ // eligible for Lifecycle action as soon as it becomes noncurrent.
+ DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"`
+ // This condition is relevant only for versioned objects. An object
+ // version satisfies this condition only if it became noncurrent before
+ // the specified date in UTC.
+ NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"`
+ // List of object name prefixes. If any prefix exactly matches the
+ // beginning of the object name, the condition evaluates to true.
+ MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"`
+ // List of object name suffixes. If any suffix exactly matches the
+ // end of the object name, the condition evaluates to true.
+ MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"`
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) Reset() {
+ *x = Bucket_Lifecycle_Rule_Condition{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[76]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {}
+
+func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[76]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead.
+func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1}
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 {
+ if x != nil && x.AgeDays != nil {
+ return *x.AgeDays
+ }
+ return 0
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date {
+ if x != nil {
+ return x.CreatedBefore
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool {
+ if x != nil && x.IsLive != nil {
+ return *x.IsLive
+ }
+ return false
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 {
+ if x != nil && x.NumNewerVersions != nil {
+ return *x.NumNewerVersions
+ }
+ return 0
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string {
+ if x != nil {
+ return x.MatchesStorageClass
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 {
+ if x != nil && x.DaysSinceCustomTime != nil {
+ return *x.DaysSinceCustomTime
+ }
+ return 0
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date {
+ if x != nil {
+ return x.CustomTimeBefore
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 {
+ if x != nil && x.DaysSinceNoncurrentTime != nil {
+ return *x.DaysSinceNoncurrentTime
+ }
+ return 0
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date {
+ if x != nil {
+ return x.NoncurrentTimeBefore
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string {
+ if x != nil {
+ return x.MatchesPrefix
+ }
+ return nil
+}
+
+func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string {
+ if x != nil {
+ return x.MatchesSuffix
+ }
+ return nil
+}
+
+var File_google_storage_v2_storage_proto protoreflect.FileDescriptor
+
+var file_google_storage_v2_storage_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
+ 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+ 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
+ 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
+ 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69,
0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
@@ -7281,1677 +7287,1685 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12,
- 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
- 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70,
- 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43,
0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12,
- 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
- 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a,
- 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66,
- 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
- 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8,
- 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f,
- 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50,
- 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66,
- 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04,
- 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
- 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
+ 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c,
+ 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52,
+ 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73,
+ 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
- 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a,
- 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02,
- 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66,
+ 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73,
+ 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
+ 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11,
+ 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14,
+ 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x22, 0xe2, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
+ 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
+ 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a,
+ 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66,
0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b,
- 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63,
- 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
- 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d,
+ 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a,
+ 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11,
+ 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a,
+ 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
+ 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f,
+ 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d,
+ 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
+ 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a,
+ 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a,
+ 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a,
+ 0x10, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63,
+ 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d,
+ 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75,
+ 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
+ 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
+ 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66,
+ 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f,
+ 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69,
+ 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c,
+ 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61,
+ 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b,
+ 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f,
+ 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x22, 0xe4, 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
+ 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15,
+ 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52,
+ 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65,
+ 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f,
+ 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69,
0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63,
- 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f,
- 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c,
- 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a,
- 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22,
- 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62,
- 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73,
- 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69,
- 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61,
+ 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d,
+ 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
+ 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72,
+ 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63,
+ 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06,
0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
+ 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a,
+ 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66,
+ 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12,
+ 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00,
+ 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65,
+ 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e,
+ 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66,
+ 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04,
+ 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e,
+ 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65,
+ 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e,
+ 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5,
+ 0x04, 0x0a, 0x16, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72,
+ 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73,
+ 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65,
+ 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
+ 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44,
+ 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
+ 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b,
+ 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c,
+ 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66,
+ 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d,
+ 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
- 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
- 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01,
- 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4,
- 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52,
- 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
- 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66,
- 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f,
- 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
+ 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a,
+ 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06,
+ 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f,
+ 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65,
+ 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70,
+ 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69,
+ 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f,
+ 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74,
+ 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d,
+ 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73,
+ 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72,
+ 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72,
+ 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65,
+ 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69,
+ 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c,
+ 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f,
+ 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18,
+ 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73,
+ 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a,
+ 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08,
+ 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72,
+ 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65,
+ 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d,
+ 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a,
+ 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69,
+ 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41,
+ 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28,
+ 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b,
+ 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
+ 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73,
+ 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a,
+ 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72,
+ 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e,
+ 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69,
+ 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14,
+ 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17,
+ 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66,
+ 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69,
+ 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a,
+ 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e,
+ 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47,
+ 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74,
+ 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c,
+ 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12,
+ 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68,
+ 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c,
+ 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42,
+ 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61,
+ 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d,
- 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
- 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10,
- 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f,
- 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e,
- 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72,
+ 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65,
- 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33,
- 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69,
- 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53,
- 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
- 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
- 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
- 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11,
- 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65,
- 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26,
- 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64,
- 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
- 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
+ 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6,
+ 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77,
+ 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
+ 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72,
+ 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68,
- 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69,
- 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73,
- 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74,
- 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72,
- 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48,
- 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16,
- 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12,
- 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64,
- 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68,
- 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52,
- 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
- 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12,
- 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b,
- 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69,
- 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
- 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69,
- 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64,
- 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69,
- 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
- 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f,
- 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65,
- 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f,
- 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65,
- 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61,
- 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72,
- 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12,
- 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
- 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01,
- 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
- 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
- 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63,
- 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67,
- 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66,
- 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c,
- 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73,
- 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63,
- 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67,
- 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65,
- 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72,
- 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61,
+ 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f,
+ 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a,
+ 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69,
+ 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65,
+ 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40,
+ 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b,
+ 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73,
- 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72,
- 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10,
- 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f,
- 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12,
- 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02,
- 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18,
- 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a,
- 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f,
- 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69,
- 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
- 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74,
- 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
- 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66,
- 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d,
- 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74,
- 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69,
- 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63,
- 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18,
- 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72,
- 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b,
- 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c,
- 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27,
- 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35,
- 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63,
- 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65,
- 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42,
+ 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42,
0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
- 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21,
- 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
- 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f,
- 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72,
- 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13,
- 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74,
- 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a,
- 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
- 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70,
- 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22,
+ 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d,
+ 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
+ 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61,
+ 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61,
+ 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61,
+ 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08,
+ 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
- 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
- 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
- 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e,
- 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72,
- 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a,
- 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14,
- 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c,
- 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18,
- 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10,
+ 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65,
+ 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49,
+ 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41,
0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74,
+ 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d,
+ 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a,
+ 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b,
+ 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69,
+ 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
+ 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b,
+ 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65,
+ 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b,
+ 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x88, 0x03, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
+ 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20,
+ 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64,
+ 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50,
0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
- 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65,
- 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a,
- 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61,
- 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d,
- 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09,
- 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d,
- 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02,
- 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e,
- 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a,
- 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45,
- 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73,
- 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63,
- 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
- 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
- 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d,
- 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b,
- 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52,
- 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
- 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d,
- 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b,
- 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05,
- 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a,
- 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
- 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41,
- 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80,
- 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f,
- 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01,
- 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53,
- 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d,
+ 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x19, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x05, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67,
+ 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31,
+ 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67,
+ 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
+ 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79,
+ 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74,
+ 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53,
+ 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41,
+ 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54,
+ 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57,
+ 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53,
+ 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a,
+ 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02,
+ 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d,
+ 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41,
+ 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d,
0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41,
- 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59,
- 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55,
- 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49,
- 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10,
- 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d,
+ 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42,
+ 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43,
+ 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54,
+ 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10,
+ 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54,
0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f,
- 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a,
- 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41,
- 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f,
- 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58,
- 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f,
- 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54,
- 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59,
- 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55,
- 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f,
+ 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27,
+ 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42,
+ 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c,
+ 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50,
+ 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d,
+ 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
+ 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45,
+ 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46,
+ 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41,
+ 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e,
+ 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f,
0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f,
- 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31,
- 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42,
- 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80,
- 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43,
- 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54,
- 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e,
- 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41,
- 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55,
- 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45,
- 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e,
- 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42,
- 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59,
- 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42,
- 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c,
- 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55,
- 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f,
- 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f,
- 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf5, 0x23, 0x0a, 0x06, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20,
- 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64,
- 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61,
- 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73,
- 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72,
- 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12,
- 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61,
- 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41,
- 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18,
- 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65,
- 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18,
- 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a,
- 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62,
- 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73,
- 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
- 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55,
+ 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d,
+ 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45,
+ 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58,
+ 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55,
+ 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41,
+ 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c,
+ 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d,
+ 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45,
+ 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53,
+ 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53,
+ 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56,
+ 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22,
+ 0xf5, 0x23, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa,
+ 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63,
+ 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63,
+ 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52,
+ 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04,
+ 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73,
+ 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65,
+ 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07,
+ 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a,
+ 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e,
+ 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77,
+ 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77,
+ 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12,
+ 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67,
+ 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65,
- 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69,
+ 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
+ 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d,
+ 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a,
+ 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75,
+ 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75,
+ 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61,
+ 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a,
+ 0x16, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63,
+ 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x15, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61,
+ 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f,
+ 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69,
- 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67,
- 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c,
+ 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04,
+ 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06,
+ 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a,
+ 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65,
+ 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b,
+ 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41,
+ 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73,
+ 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67,
- 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f,
- 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c,
- 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a,
- 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61,
- 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73,
- 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
- 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74,
- 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50,
- 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61,
- 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61,
- 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72,
- 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15,
- 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66,
- 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12,
- 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12,
- 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12,
- 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f,
- 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
- 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e,
- 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52,
- 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1,
- 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b,
- 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c,
- 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52,
- 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65,
- 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65,
- 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74,
- 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12,
- 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63,
- 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69,
- 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65,
- 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79,
- 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f,
- 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c,
- 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66,
+ 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38,
+ 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
+ 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66,
+ 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37,
+ 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65,
+ 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c,
- 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f,
- 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
- 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73,
- 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62,
- 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a,
- 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01,
- 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e,
- 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65,
- 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32,
- 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61,
- 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65,
- 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43,
- 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f,
- 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73,
- 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a,
- 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63,
- 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f,
- 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12,
- 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61,
- 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69,
- 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
- 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69,
- 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73,
- 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64,
- 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42,
- 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f,
+ 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72,
+ 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c,
+ 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f,
+ 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67,
+ 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44,
+ 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f,
+ 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01,
+ 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10,
+ 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f,
0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53,
+ 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01,
+ 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65,
+ 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f,
+ 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65,
0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c,
- 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f,
- 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e,
- 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66,
- 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d,
- 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a,
- 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65,
- 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d,
- 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01,
- 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62,
- 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c,
- 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a,
- 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69,
- 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67,
- 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66,
- 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a,
- 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d,
- 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02,
- 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65,
- 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e,
- 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67,
- 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69,
+ 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d,
+ 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
+ 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72,
+ 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a,
+ 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
+ 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72,
+ 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f,
+ 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f,
+ 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f,
+ 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77,
+ 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f,
+ 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f,
+ 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f,
+ 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12,
+ 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72,
+ 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f,
+ 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
+ 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12,
+ 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
+ 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f,
+ 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d,
+ 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a,
+ 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f,
+ 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a,
+ 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a,
+ 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69,
+ 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61,
+ 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e,
+ 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61,
+ 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64,
+ 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f,
+ 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16,
+ 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14,
+ 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69,
0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73,
- 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69,
- 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88,
- 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48,
- 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
- 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61,
- 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42,
- 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72,
- 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39,
- 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63,
- 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f,
- 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e,
- 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16,
- 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65,
- 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69,
- 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52,
- 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12,
- 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
- 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07,
- 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a,
- 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f,
- 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48,
- 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe,
- 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52,
- 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa,
- 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69,
- 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69,
- 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65,
- 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22,
- 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74,
- 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f,
- 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12,
- 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
- 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d,
- 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72,
- 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74,
- 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f,
- 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a,
- 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f,
- 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d,
- 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53,
- 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d,
- 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
- 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a,
- 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61,
- 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63,
- 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a,
- 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63,
- 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e,
- 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65,
- 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a,
- 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e,
- 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c,
- 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f,
- 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09,
- 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79,
- 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06,
- 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69,
- 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f,
- 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70,
- 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45,
- 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74,
- 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c,
+ 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65,
+ 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63,
+ 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61,
+ 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48,
+ 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62,
+ 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47,
+ 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12,
+ 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72,
+ 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12,
+ 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04,
+ 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41,
+ 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61,
+ 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64,
+ 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88,
+ 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a,
+ 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73,
+ 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07,
+ 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a,
+ 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63,
+ 0x33, 0x32, 0x63, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
+ 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10,
+ 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35,
+ 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa,
+ 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61,
+ 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a,
+ 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e,
+ 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69,
+ 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63,
+ 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38,
+ 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d,
- 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f,
- 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a,
- 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e,
- 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65,
- 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72,
- 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75,
- 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73,
- 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x48, 0x02, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62,
- 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f,
- 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13,
- 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72,
- 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12,
- 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12,
- 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05,
- 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61,
- 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d,
- 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01,
- 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72,
- 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72,
- 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70,
- 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48,
- 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a,
- 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75,
- 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d,
- 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22,
- 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69,
- 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a,
- 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
- 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
- 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e,
- 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa,
- 0x27, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f,
- 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75,
+ 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e,
+ 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a,
+ 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f,
+ 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20,
+ 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e,
+ 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12,
- 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a,
- 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
- 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a,
- 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01,
- 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3,
- 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b,
- 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47,
- 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49,
- 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a,
- 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
- 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda,
- 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d,
- 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50,
- 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76,
- 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41,
- 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12,
+ 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b,
+ 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25,
+ 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64,
+ 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72,
+ 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73,
+ 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e,
+ 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e,
+ 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56,
+ 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37,
- 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
- 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12,
- 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65,
- 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a,
- 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41,
- 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
- 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73,
- 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23,
- 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d,
- 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba,
- 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62,
- 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09,
- 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09,
- 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65,
- 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
+ 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01,
+ 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x02,
+ 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64,
+ 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x68,
+ 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x22,
+ 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74,
+ 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c,
+ 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69,
+ 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69,
+ 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16,
+ 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, 0x69,
+ 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
+ 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78,
+ 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+ 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x74, 0x65, 0x61, 0x6d, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a,
+ 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f,
+ 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
+ 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e,
+ 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f,
+ 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a,
+ 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b,
+ 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x22, 0xb1, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20,
+ 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65,
+ 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78,
+ 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x99, 0x04, 0x0a, 0x12, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05,
+ 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x24,
+ 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65,
+ 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
+ 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72,
+ 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x32, 0x88, 0x28, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
+ 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a,
+ 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
+ 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01,
+ 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65,
+ 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61,
- 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d,
+ 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a,
+ 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65,
+ 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61,
+ 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12,
+ 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x96, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73,
+ 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e,
+ 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61,
+ 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3,
+ 0xe4, 0x93, 0x02, 0x8c, 0x01, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x12, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a,
+ 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e,
+ 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
+ 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98,
+ 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
+ 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,
0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93,
0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65,
+ 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22,
- 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f,
- 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61,
+ 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73,
+ 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73,
+ 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69,
+ 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61,
+ 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5,
+ 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a,
+ 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a,
- 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae,
- 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c,
- 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
- 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
- 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a,
- 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70,
- 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12,
- 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda,
- 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
- 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20,
- 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a,
- 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28,
+ 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98,
+ 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a,
+ 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74,
+ 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d,
+ 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61,
+ 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
+ 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09,
+ 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x83, 0x01, 0x0a, 0x11,
+ 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b,
- 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67,
+ 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x22, 0x1e, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4,
+ 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02,
+ 0x01, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
+ 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d,
+ 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda,
- 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b,
- 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61,
- 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41,
- 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b,
- 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f,
+ 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09,
+ 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02, 0x01, 0x12, 0x7a, 0x0a, 0x0d,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x28,
+ 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02, 0x01, 0x12, 0x80, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x74,
+ 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48,
+ 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x22, 0x28, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a,
+ 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02, 0x01, 0x12, 0x7f, 0x0a, 0x0c, 0x4c,
+ 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67,
+ 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63,
+ 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0xda, 0x41,
+ 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09,
+ 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x88, 0x02, 0x01, 0x12, 0xa0, 0x01, 0x0a,
+ 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63,
+ 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0xda, 0x41, 0x14,
+ 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d,
+ 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c,
+ 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x88, 0x02, 0x01, 0x12,
+ 0xa2, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x22, 0x3f, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12,
- 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a,
- 0x7d, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a,
- 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c,
- 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72,
- 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
- 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a,
+ 0x2a, 0x88, 0x02, 0x01, 0x12, 0xab, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a,
+ 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x88,
+ 0x02, 0x01, 0x12, 0xb4, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3d, 0xda, 0x41, 0x1a, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12,
+ 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x88, 0x02, 0x01, 0x12, 0xab, 0x01, 0x0a, 0x17, 0x4c, 0x69,
+ 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0xda, 0x41,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x88, 0x02, 0x01, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61,
+ 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41,
- 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65,
- 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67,
- 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74,
+ 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64,
+ 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70,
+ 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65,
+ 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
+ 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a,
+ 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -8977,56 +8991,56 @@ var file_google_storage_v2_storage_proto_goTypes = []any{
(*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse
(*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest
(*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest
- (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest
- (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest
- (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest
- (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest
- (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse
- (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest
- (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest
- (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest
- (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest
- (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse
- (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest
- (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest
- (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse
- (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec
- (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest
- (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse
- (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest
- (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse
- (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest
- (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest
- (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse
- (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest
- (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse
- (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest
- (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse
- (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest
- (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest
- (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest
- (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse
- (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest
- (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest
- (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest
- (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse
- (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest
- (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams
- (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants
- (*Bucket)(nil), // 44: google.storage.v2.Bucket
- (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl
- (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData
- (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums
- (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata
- (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig
- (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption
- (*Object)(nil), // 51: google.storage.v2.Object
- (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl
- (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse
- (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam
- (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount
- (*Owner)(nil), // 56: google.storage.v2.Owner
- (*ContentRange)(nil), // 57: google.storage.v2.ContentRange
+ (*ComposeObjectRequest)(nil), // 8: google.storage.v2.ComposeObjectRequest
+ (*DeleteObjectRequest)(nil), // 9: google.storage.v2.DeleteObjectRequest
+ (*RestoreObjectRequest)(nil), // 10: google.storage.v2.RestoreObjectRequest
+ (*CancelResumableWriteRequest)(nil), // 11: google.storage.v2.CancelResumableWriteRequest
+ (*CancelResumableWriteResponse)(nil), // 12: google.storage.v2.CancelResumableWriteResponse
+ (*ReadObjectRequest)(nil), // 13: google.storage.v2.ReadObjectRequest
+ (*GetObjectRequest)(nil), // 14: google.storage.v2.GetObjectRequest
+ (*ReadObjectResponse)(nil), // 15: google.storage.v2.ReadObjectResponse
+ (*WriteObjectSpec)(nil), // 16: google.storage.v2.WriteObjectSpec
+ (*WriteObjectRequest)(nil), // 17: google.storage.v2.WriteObjectRequest
+ (*WriteObjectResponse)(nil), // 18: google.storage.v2.WriteObjectResponse
+ (*BidiWriteObjectRequest)(nil), // 19: google.storage.v2.BidiWriteObjectRequest
+ (*BidiWriteObjectResponse)(nil), // 20: google.storage.v2.BidiWriteObjectResponse
+ (*ListObjectsRequest)(nil), // 21: google.storage.v2.ListObjectsRequest
+ (*QueryWriteStatusRequest)(nil), // 22: google.storage.v2.QueryWriteStatusRequest
+ (*QueryWriteStatusResponse)(nil), // 23: google.storage.v2.QueryWriteStatusResponse
+ (*RewriteObjectRequest)(nil), // 24: google.storage.v2.RewriteObjectRequest
+ (*RewriteResponse)(nil), // 25: google.storage.v2.RewriteResponse
+ (*StartResumableWriteRequest)(nil), // 26: google.storage.v2.StartResumableWriteRequest
+ (*StartResumableWriteResponse)(nil), // 27: google.storage.v2.StartResumableWriteResponse
+ (*UpdateObjectRequest)(nil), // 28: google.storage.v2.UpdateObjectRequest
+ (*GetServiceAccountRequest)(nil), // 29: google.storage.v2.GetServiceAccountRequest
+ (*ServiceAccount)(nil), // 30: google.storage.v2.ServiceAccount
+ (*CreateHmacKeyRequest)(nil), // 31: google.storage.v2.CreateHmacKeyRequest
+ (*CreateHmacKeyResponse)(nil), // 32: google.storage.v2.CreateHmacKeyResponse
+ (*DeleteHmacKeyRequest)(nil), // 33: google.storage.v2.DeleteHmacKeyRequest
+ (*GetHmacKeyRequest)(nil), // 34: google.storage.v2.GetHmacKeyRequest
+ (*ListHmacKeysRequest)(nil), // 35: google.storage.v2.ListHmacKeysRequest
+ (*ListHmacKeysResponse)(nil), // 36: google.storage.v2.ListHmacKeysResponse
+ (*UpdateHmacKeyRequest)(nil), // 37: google.storage.v2.UpdateHmacKeyRequest
+ (*HmacKeyMetadata)(nil), // 38: google.storage.v2.HmacKeyMetadata
+ (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams
+ (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants
+ (*Bucket)(nil), // 41: google.storage.v2.Bucket
+ (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl
+ (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData
+ (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums
+ (*CustomerEncryption)(nil), // 45: google.storage.v2.CustomerEncryption
+ (*Object)(nil), // 46: google.storage.v2.Object
+ (*ObjectAccessControl)(nil), // 47: google.storage.v2.ObjectAccessControl
+ (*ListObjectsResponse)(nil), // 48: google.storage.v2.ListObjectsResponse
+ (*ProjectTeam)(nil), // 49: google.storage.v2.ProjectTeam
+ (*Owner)(nil), // 50: google.storage.v2.Owner
+ (*ContentRange)(nil), // 51: google.storage.v2.ContentRange
+ (*DeleteNotificationConfigRequest)(nil), // 52: google.storage.v2.DeleteNotificationConfigRequest
+ (*GetNotificationConfigRequest)(nil), // 53: google.storage.v2.GetNotificationConfigRequest
+ (*CreateNotificationConfigRequest)(nil), // 54: google.storage.v2.CreateNotificationConfigRequest
+ (*ListNotificationConfigsRequest)(nil), // 55: google.storage.v2.ListNotificationConfigsRequest
+ (*ListNotificationConfigsResponse)(nil), // 56: google.storage.v2.ListNotificationConfigsResponse
+ (*NotificationConfig)(nil), // 57: google.storage.v2.NotificationConfig
(*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject
(*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
(*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing
@@ -9047,8 +9061,8 @@ var file_google_storage_v2_storage_proto_goTypes = []any{
(*Bucket_Lifecycle_Rule)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule
(*Bucket_Lifecycle_Rule_Action)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Action
(*Bucket_Lifecycle_Rule_Condition)(nil), // 77: google.storage.v2.Bucket.Lifecycle.Rule.Condition
- nil, // 78: google.storage.v2.NotificationConfig.CustomAttributesEntry
- nil, // 79: google.storage.v2.Object.MetadataEntry
+ nil, // 78: google.storage.v2.Object.MetadataEntry
+ nil, // 79: google.storage.v2.NotificationConfig.CustomAttributesEntry
(*fieldmaskpb.FieldMask)(nil), // 80: google.protobuf.FieldMask
(*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp
(*durationpb.Duration)(nil), // 82: google.protobuf.Duration
@@ -9062,57 +9076,57 @@ var file_google_storage_v2_storage_proto_goTypes = []any{
}
var file_google_storage_v2_storage_proto_depIdxs = []int32{
80, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask
- 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
+ 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
80, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask
- 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket
- 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
+ 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket
+ 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
80, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask
- 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig
- 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig
- 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object
- 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject
- 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 80, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
- 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 80, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
- 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange
- 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object
- 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object
- 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
- 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
- 80, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
- 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
- 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
- 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
- 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
- 80, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
- 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata
- 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata
- 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata
- 80, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask
- 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
- 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
+ 46, // 6: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object
+ 58, // 7: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject
+ 39, // 8: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 44, // 9: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 39, // 10: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 39, // 11: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 39, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 80, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 39, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 80, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 43, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 44, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 51, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange
+ 46, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object
+ 46, // 20: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object
+ 16, // 21: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 43, // 22: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 44, // 23: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 39, // 24: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 46, // 25: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
+ 16, // 26: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 43, // 27: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 44, // 28: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 39, // 29: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 46, // 30: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
+ 80, // 31: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 39, // 32: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 46, // 33: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
+ 46, // 34: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
+ 39, // 35: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 44, // 36: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 46, // 37: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
+ 16, // 38: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 39, // 39: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 44, // 40: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 46, // 41: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
+ 80, // 42: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 39, // 43: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 38, // 44: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata
+ 38, // 45: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata
+ 38, // 46: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata
+ 80, // 47: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 81, // 48: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp
+ 81, // 49: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp
+ 42, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
+ 47, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle
81, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp
61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors
@@ -9121,7 +9135,7 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{
69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website
68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning
65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging
- 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
+ 50, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption
60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing
66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy
@@ -9130,25 +9144,25 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{
71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass
72, // 67: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace
67, // 68: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy
- 54, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
- 81, // 70: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp
- 81, // 71: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp
- 78, // 72: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry
- 52, // 73: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
- 81, // 74: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
- 81, // 75: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
- 47, // 76: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
- 81, // 77: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
- 81, // 78: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
- 81, // 79: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
- 79, // 80: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
- 56, // 81: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
- 50, // 82: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
- 81, // 83: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
- 81, // 84: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
- 81, // 85: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
- 54, // 86: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
- 51, // 87: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
+ 49, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
+ 47, // 70: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
+ 81, // 71: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
+ 81, // 72: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
+ 44, // 73: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
+ 81, // 74: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
+ 81, // 75: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
+ 81, // 76: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
+ 78, // 77: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
+ 50, // 78: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
+ 45, // 79: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
+ 81, // 80: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
+ 81, // 81: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
+ 81, // 82: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
+ 49, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
+ 46, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
+ 57, // 85: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig
+ 57, // 86: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig
+ 79, // 87: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry
59, // 88: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
74, // 89: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
75, // 90: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule
@@ -9173,61 +9187,61 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{
85, // 109: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
86, // 110: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
7, // 111: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest
- 8, // 112: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest
- 9, // 113: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest
- 10, // 114: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest
- 11, // 115: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest
- 13, // 116: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
- 14, // 117: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
- 15, // 118: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
- 16, // 119: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
- 19, // 120: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
- 18, // 121: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
- 33, // 122: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
- 22, // 123: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
- 24, // 124: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
- 26, // 125: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
- 29, // 126: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
- 31, // 127: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
- 27, // 128: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
- 34, // 129: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest
- 35, // 130: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest
- 37, // 131: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest
- 38, // 132: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest
- 39, // 133: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest
- 41, // 134: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest
+ 8, // 112: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
+ 9, // 113: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
+ 10, // 114: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
+ 11, // 115: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
+ 14, // 116: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
+ 13, // 117: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
+ 28, // 118: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
+ 17, // 119: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
+ 19, // 120: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
+ 21, // 121: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
+ 24, // 122: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
+ 26, // 123: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
+ 22, // 124: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
+ 29, // 125: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest
+ 31, // 126: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest
+ 33, // 127: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest
+ 34, // 128: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest
+ 35, // 129: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest
+ 37, // 130: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest
+ 52, // 131: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest
+ 53, // 132: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest
+ 54, // 133: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest
+ 55, // 134: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest
87, // 135: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty
- 44, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
- 44, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
+ 41, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
+ 41, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
5, // 138: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse
- 44, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
+ 41, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
88, // 140: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy
88, // 141: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy
89, // 142: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
- 44, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
- 87, // 144: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty
- 49, // 145: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig
- 49, // 146: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig
- 12, // 147: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse
- 51, // 148: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
- 87, // 149: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
- 51, // 150: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
- 17, // 151: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
- 51, // 152: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
- 20, // 153: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
- 51, // 154: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
- 23, // 155: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
- 25, // 156: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
- 53, // 157: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
- 30, // 158: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
- 32, // 159: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
- 28, // 160: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
- 55, // 161: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount
- 36, // 162: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse
- 87, // 163: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty
- 48, // 164: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
- 40, // 165: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse
- 48, // 166: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
+ 41, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
+ 46, // 144: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
+ 87, // 145: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
+ 46, // 146: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
+ 12, // 147: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
+ 46, // 148: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
+ 15, // 149: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
+ 46, // 150: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
+ 18, // 151: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
+ 20, // 152: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
+ 48, // 153: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
+ 25, // 154: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
+ 27, // 155: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
+ 23, // 156: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
+ 30, // 157: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount
+ 32, // 158: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse
+ 87, // 159: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty
+ 38, // 160: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
+ 36, // 161: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse
+ 38, // 162: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
+ 87, // 163: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty
+ 57, // 164: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig
+ 57, // 165: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig
+ 56, // 166: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse
135, // [135:167] is the sub-list for method output_type
103, // [103:135] is the sub-list for method input_type
103, // [103:103] is the sub-list for extension type_name
@@ -9326,7 +9340,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteNotificationConfigRequest); i {
+ switch v := v.(*ComposeObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9338,7 +9352,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*GetNotificationConfigRequest); i {
+ switch v := v.(*DeleteObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9350,7 +9364,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*CreateNotificationConfigRequest); i {
+ switch v := v.(*RestoreObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9362,7 +9376,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*ListNotificationConfigsRequest); i {
+ switch v := v.(*CancelResumableWriteRequest); i {
case 0:
return &v.state
case 1:
@@ -9374,7 +9388,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*ListNotificationConfigsResponse); i {
+ switch v := v.(*CancelResumableWriteResponse); i {
case 0:
return &v.state
case 1:
@@ -9386,7 +9400,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*ComposeObjectRequest); i {
+ switch v := v.(*ReadObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9398,7 +9412,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteObjectRequest); i {
+ switch v := v.(*GetObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9410,7 +9424,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreObjectRequest); i {
+ switch v := v.(*ReadObjectResponse); i {
case 0:
return &v.state
case 1:
@@ -9422,7 +9436,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*CancelResumableWriteRequest); i {
+ switch v := v.(*WriteObjectSpec); i {
case 0:
return &v.state
case 1:
@@ -9434,7 +9448,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*CancelResumableWriteResponse); i {
+ switch v := v.(*WriteObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9446,7 +9460,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*ReadObjectRequest); i {
+ switch v := v.(*WriteObjectResponse); i {
case 0:
return &v.state
case 1:
@@ -9458,7 +9472,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*GetObjectRequest); i {
+ switch v := v.(*BidiWriteObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9470,7 +9484,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*ReadObjectResponse); i {
+ switch v := v.(*BidiWriteObjectResponse); i {
case 0:
return &v.state
case 1:
@@ -9482,7 +9496,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*WriteObjectSpec); i {
+ switch v := v.(*ListObjectsRequest); i {
case 0:
return &v.state
case 1:
@@ -9494,7 +9508,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*WriteObjectRequest); i {
+ switch v := v.(*QueryWriteStatusRequest); i {
case 0:
return &v.state
case 1:
@@ -9506,7 +9520,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*WriteObjectResponse); i {
+ switch v := v.(*QueryWriteStatusResponse); i {
case 0:
return &v.state
case 1:
@@ -9518,7 +9532,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*BidiWriteObjectRequest); i {
+ switch v := v.(*RewriteObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9530,7 +9544,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*BidiWriteObjectResponse); i {
+ switch v := v.(*RewriteResponse); i {
case 0:
return &v.state
case 1:
@@ -9542,7 +9556,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*ListObjectsRequest); i {
+ switch v := v.(*StartResumableWriteRequest); i {
case 0:
return &v.state
case 1:
@@ -9554,7 +9568,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*QueryWriteStatusRequest); i {
+ switch v := v.(*StartResumableWriteResponse); i {
case 0:
return &v.state
case 1:
@@ -9566,7 +9580,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*QueryWriteStatusResponse); i {
+ switch v := v.(*UpdateObjectRequest); i {
case 0:
return &v.state
case 1:
@@ -9578,7 +9592,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*RewriteObjectRequest); i {
+ switch v := v.(*GetServiceAccountRequest); i {
case 0:
return &v.state
case 1:
@@ -9590,7 +9604,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*RewriteResponse); i {
+ switch v := v.(*ServiceAccount); i {
case 0:
return &v.state
case 1:
@@ -9602,7 +9616,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*StartResumableWriteRequest); i {
+ switch v := v.(*CreateHmacKeyRequest); i {
case 0:
return &v.state
case 1:
@@ -9614,7 +9628,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*StartResumableWriteResponse); i {
+ switch v := v.(*CreateHmacKeyResponse); i {
case 0:
return &v.state
case 1:
@@ -9626,7 +9640,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateObjectRequest); i {
+ switch v := v.(*DeleteHmacKeyRequest); i {
case 0:
return &v.state
case 1:
@@ -9638,7 +9652,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v any, i int) any {
- switch v := v.(*GetServiceAccountRequest); i {
+ switch v := v.(*GetHmacKeyRequest); i {
case 0:
return &v.state
case 1:
@@ -9650,7 +9664,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v any, i int) any {
- switch v := v.(*CreateHmacKeyRequest); i {
+ switch v := v.(*ListHmacKeysRequest); i {
case 0:
return &v.state
case 1:
@@ -9662,7 +9676,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v any, i int) any {
- switch v := v.(*CreateHmacKeyResponse); i {
+ switch v := v.(*ListHmacKeysResponse); i {
case 0:
return &v.state
case 1:
@@ -9674,7 +9688,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteHmacKeyRequest); i {
+ switch v := v.(*UpdateHmacKeyRequest); i {
case 0:
return &v.state
case 1:
@@ -9686,7 +9700,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v any, i int) any {
- switch v := v.(*GetHmacKeyRequest); i {
+ switch v := v.(*HmacKeyMetadata); i {
case 0:
return &v.state
case 1:
@@ -9698,7 +9712,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v any, i int) any {
- switch v := v.(*ListHmacKeysRequest); i {
+ switch v := v.(*CommonObjectRequestParams); i {
case 0:
return &v.state
case 1:
@@ -9710,7 +9724,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v any, i int) any {
- switch v := v.(*ListHmacKeysResponse); i {
+ switch v := v.(*ServiceConstants); i {
case 0:
return &v.state
case 1:
@@ -9722,7 +9736,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateHmacKeyRequest); i {
+ switch v := v.(*Bucket); i {
case 0:
return &v.state
case 1:
@@ -9734,7 +9748,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v any, i int) any {
- switch v := v.(*CommonObjectRequestParams); i {
+ switch v := v.(*BucketAccessControl); i {
case 0:
return &v.state
case 1:
@@ -9746,7 +9760,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceConstants); i {
+ switch v := v.(*ChecksummedData); i {
case 0:
return &v.state
case 1:
@@ -9758,7 +9772,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v any, i int) any {
- switch v := v.(*Bucket); i {
+ switch v := v.(*ObjectChecksums); i {
case 0:
return &v.state
case 1:
@@ -9770,7 +9784,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v any, i int) any {
- switch v := v.(*BucketAccessControl); i {
+ switch v := v.(*CustomerEncryption); i {
case 0:
return &v.state
case 1:
@@ -9782,7 +9796,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v any, i int) any {
- switch v := v.(*ChecksummedData); i {
+ switch v := v.(*Object); i {
case 0:
return &v.state
case 1:
@@ -9794,7 +9808,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v any, i int) any {
- switch v := v.(*ObjectChecksums); i {
+ switch v := v.(*ObjectAccessControl); i {
case 0:
return &v.state
case 1:
@@ -9806,7 +9820,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v any, i int) any {
- switch v := v.(*HmacKeyMetadata); i {
+ switch v := v.(*ListObjectsResponse); i {
case 0:
return &v.state
case 1:
@@ -9818,7 +9832,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v any, i int) any {
- switch v := v.(*NotificationConfig); i {
+ switch v := v.(*ProjectTeam); i {
case 0:
return &v.state
case 1:
@@ -9830,7 +9844,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v any, i int) any {
- switch v := v.(*CustomerEncryption); i {
+ switch v := v.(*Owner); i {
case 0:
return &v.state
case 1:
@@ -9842,7 +9856,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v any, i int) any {
- switch v := v.(*Object); i {
+ switch v := v.(*ContentRange); i {
case 0:
return &v.state
case 1:
@@ -9854,7 +9868,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v any, i int) any {
- switch v := v.(*ObjectAccessControl); i {
+ switch v := v.(*DeleteNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
@@ -9866,7 +9880,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v any, i int) any {
- switch v := v.(*ListObjectsResponse); i {
+ switch v := v.(*GetNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
@@ -9878,7 +9892,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v any, i int) any {
- switch v := v.(*ProjectTeam); i {
+ switch v := v.(*CreateNotificationConfigRequest); i {
case 0:
return &v.state
case 1:
@@ -9890,7 +9904,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceAccount); i {
+ switch v := v.(*ListNotificationConfigsRequest); i {
case 0:
return &v.state
case 1:
@@ -9902,7 +9916,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v any, i int) any {
- switch v := v.(*Owner); i {
+ switch v := v.(*ListNotificationConfigsResponse); i {
case 0:
return &v.state
case 1:
@@ -9914,7 +9928,7 @@ func file_google_storage_v2_storage_proto_init() {
}
}
file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v any, i int) any {
- switch v := v.(*ContentRange); i {
+ switch v := v.(*NotificationConfig); i {
case 0:
return &v.state
case 1:
@@ -10158,40 +10172,40 @@ func file_google_storage_v2_storage_proto_init() {
file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[7].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[8].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[9].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []any{
(*WriteObjectRequest_UploadId)(nil),
(*WriteObjectRequest_WriteObjectSpec)(nil),
(*WriteObjectRequest_ChecksummedData)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []any{
(*WriteObjectResponse_PersistedSize)(nil),
(*WriteObjectResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{
(*BidiWriteObjectRequest_UploadId)(nil),
(*BidiWriteObjectRequest_WriteObjectSpec)(nil),
(*BidiWriteObjectRequest_ChecksummedData)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{
(*BidiWriteObjectResponse_PersistedSize)(nil),
(*BidiWriteObjectResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{
+ file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []any{
(*QueryWriteStatusResponse_PersistedSize)(nil),
(*QueryWriteStatusResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{}
- file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []any{}
file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []any{}
@@ -10247,25 +10261,16 @@ type StorageClient interface {
// The `resource` field in the request should be
// `projects/_/buckets/{bucket}`.
SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Tests a set of permissions on the given bucket or object to see which, if
- // any, are held by the caller.
+ // Tests a set of permissions on the given bucket, object, or managed folder
+ // to see which, if any, are held by the caller.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}` for a bucket,
+ // `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+ // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+ // for a managed folder.
TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error)
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error)
- // Permanently deletes a NotificationConfig.
- DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // View a NotificationConfig.
- GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
- // Creates a NotificationConfig for a given bucket.
- // These NotificationConfigs, when triggered, publish messages to the
- // specified Pub/Sub topics. See
- // https://cloud.google.com/storage/docs/pubsub-notifications.
- CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
- // Retrieves a list of NotificationConfigs for a given bucket.
- ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error)
// Concatenates a list of existing objects into a new object in the same
// bucket.
ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error)
@@ -10393,18 +10398,39 @@ type StorageClient interface {
// object name, the sequence of returned `persisted_size` values will be
// non-decreasing.
QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error)
+ // Deprecated: Do not use.
// Retrieves the name of a project's Google Cloud Storage service account.
GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error)
+ // Deprecated: Do not use.
// Creates a new HMAC key for the given service account.
CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error)
+ // Deprecated: Do not use.
// Deletes a given HMAC key. Key must be in an INACTIVE state.
DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Deprecated: Do not use.
// Gets an existing HMAC key metadata for the given id.
GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
+ // Deprecated: Do not use.
// Lists HMAC keys under a given project with the additional filters provided.
ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error)
+ // Deprecated: Do not use.
// Updates a given HMAC key state between ACTIVE and INACTIVE.
UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
+ // Deprecated: Do not use.
+ // Permanently deletes a NotificationConfig.
+ DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Deprecated: Do not use.
+ // View a NotificationConfig.
+ GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
+ // Deprecated: Do not use.
+ // Creates a NotificationConfig for a given bucket.
+ // These NotificationConfigs, when triggered, publish messages to the
+ // specified Pub/Sub topics. See
+ // https://cloud.google.com/storage/docs/pubsub-notifications.
+ CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
+ // Deprecated: Do not use.
+ // Retrieves a list of NotificationConfigs for a given bucket.
+ ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error)
}
type storageClient struct {
@@ -10496,42 +10522,6 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques
return out, nil
}
-func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
- out := new(NotificationConfig)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
- out := new(NotificationConfig)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) {
- out := new(ListNotificationConfigsResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) {
out := new(Object)
err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...)
@@ -10719,54 +10709,100 @@ func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStat
return out, nil
}
-func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) {
- out := new(ServiceAccount)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) {
+ out := new(ServiceAccount)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) {
+ out := new(CreateHmacKeyResponse)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
+ out := new(HmacKeyMetadata)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) {
+ out := new(ListHmacKeysResponse)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) {
- out := new(CreateHmacKeyResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
+ out := new(HmacKeyMetadata)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+// Deprecated: Do not use.
+func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
- out := new(HmacKeyMetadata)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
+ out := new(NotificationConfig)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) {
- out := new(ListHmacKeysResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
+ out := new(NotificationConfig)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
- out := new(HmacKeyMetadata)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...)
+// Deprecated: Do not use.
+func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) {
+ out := new(ListNotificationConfigsResponse)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...)
if err != nil {
return nil, err
}
@@ -10793,25 +10829,16 @@ type StorageServer interface {
// The `resource` field in the request should be
// `projects/_/buckets/{bucket}`.
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error)
- // Tests a set of permissions on the given bucket or object to see which, if
- // any, are held by the caller.
+ // Tests a set of permissions on the given bucket, object, or managed folder
+ // to see which, if any, are held by the caller.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}` for a bucket,
+ // `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+ // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+ // for a managed folder.
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error)
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error)
- // Permanently deletes a NotificationConfig.
- DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error)
- // View a NotificationConfig.
- GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error)
- // Creates a NotificationConfig for a given bucket.
- // These NotificationConfigs, when triggered, publish messages to the
- // specified Pub/Sub topics. See
- // https://cloud.google.com/storage/docs/pubsub-notifications.
- CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error)
- // Retrieves a list of NotificationConfigs for a given bucket.
- ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error)
// Concatenates a list of existing objects into a new object in the same
// bucket.
ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error)
@@ -10939,18 +10966,39 @@ type StorageServer interface {
// object name, the sequence of returned `persisted_size` values will be
// non-decreasing.
QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error)
+ // Deprecated: Do not use.
// Retrieves the name of a project's Google Cloud Storage service account.
GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error)
+ // Deprecated: Do not use.
// Creates a new HMAC key for the given service account.
CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error)
+ // Deprecated: Do not use.
// Deletes a given HMAC key. Key must be in an INACTIVE state.
DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error)
+ // Deprecated: Do not use.
// Gets an existing HMAC key metadata for the given id.
GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error)
+ // Deprecated: Do not use.
// Lists HMAC keys under a given project with the additional filters provided.
ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error)
+ // Deprecated: Do not use.
// Updates a given HMAC key state between ACTIVE and INACTIVE.
UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error)
+ // Deprecated: Do not use.
+ // Permanently deletes a NotificationConfig.
+ DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error)
+ // Deprecated: Do not use.
+ // View a NotificationConfig.
+ GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error)
+ // Deprecated: Do not use.
+ // Creates a NotificationConfig for a given bucket.
+ // These NotificationConfigs, when triggered, publish messages to the
+ // specified Pub/Sub topics. See
+ // https://cloud.google.com/storage/docs/pubsub-notifications.
+ CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error)
+ // Deprecated: Do not use.
+ // Retrieves a list of NotificationConfigs for a given bucket.
+ ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error)
}
// UnimplementedStorageServer can be embedded to have forward compatible implementations.
@@ -10984,18 +11032,6 @@ func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.Te
func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented")
}
-func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented")
-}
func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) {
return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented")
}
@@ -11053,6 +11089,18 @@ func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRe
func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented")
}
+func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented")
+}
+func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented")
+}
+func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented")
+}
+func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented")
+}
func RegisterStorageServer(s *grpc.Server, srv StorageServer) {
s.RegisterService(&_Storage_serviceDesc, srv)
@@ -11220,78 +11268,6 @@ func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec fun
return interceptor(ctx, in, info, handler)
}
-func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).DeleteNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).GetNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/GetNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).CreateNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListNotificationConfigsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).ListNotificationConfigs(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ComposeObjectRequest)
if err := dec(in); err != nil {
@@ -11653,6 +11629,78 @@ func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
+func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteNotificationConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageServer).DeleteNotificationConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageServer).GetNotificationConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.storage.v2.Storage/GetNotificationConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateNotificationConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageServer).CreateNotificationConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationConfigsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(StorageServer).ListNotificationConfigs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _Storage_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.storage.v2.Storage",
HandlerType: (*StorageServer)(nil),
@@ -11693,22 +11741,6 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateBucket",
Handler: _Storage_UpdateBucket_Handler,
},
- {
- MethodName: "DeleteNotificationConfig",
- Handler: _Storage_DeleteNotificationConfig_Handler,
- },
- {
- MethodName: "GetNotificationConfig",
- Handler: _Storage_GetNotificationConfig_Handler,
- },
- {
- MethodName: "CreateNotificationConfig",
- Handler: _Storage_CreateNotificationConfig_Handler,
- },
- {
- MethodName: "ListNotificationConfigs",
- Handler: _Storage_ListNotificationConfigs_Handler,
- },
{
MethodName: "ComposeObject",
Handler: _Storage_ComposeObject_Handler,
@@ -11773,6 +11805,22 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateHmacKey",
Handler: _Storage_UpdateHmacKey_Handler,
},
+ {
+ MethodName: "DeleteNotificationConfig",
+ Handler: _Storage_DeleteNotificationConfig_Handler,
+ },
+ {
+ MethodName: "GetNotificationConfig",
+ Handler: _Storage_GetNotificationConfig_Handler,
+ },
+ {
+ MethodName: "CreateNotificationConfig",
+ Handler: _Storage_CreateNotificationConfig_Handler,
+ },
+ {
+ MethodName: "ListNotificationConfigs",
+ Handler: _Storage_ListNotificationConfigs_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go
index e5b2de09172f5..c07789c9be782 100644
--- a/vendor/cloud.google.com/go/storage/internal/version.go
+++ b/vendor/cloud.google.com/go/storage/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.43.0"
+const Version = "1.44.0"
diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go
index de57b4bbbbc72..99783f3df47b6 100644
--- a/vendor/cloud.google.com/go/storage/invoke.go
+++ b/vendor/cloud.google.com/go/storage/invoke.go
@@ -74,7 +74,15 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry
return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err)
}
attempts++
- return !errorFunc(err), err
+ retryable := errorFunc(err)
+ // Explicitly check context cancellation so that we can distinguish between a
+ // DEADLINE_EXCEEDED error from the server and a user-set context deadline.
+ // Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's
+ // sent by the server) in both cases.
+ if ctxErr := ctx.Err(); errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) {
+ retryable = false
+ }
+ return !retryable, err
})
}
@@ -84,21 +92,7 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int
invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts)
xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ")
- // TODO: remove this once the respective transport packages merge xGoogHeader.
- // Also remove gl-go at that time, as it will be repeated.
- hdrs := callctx.HeadersFromContext(ctx)
- for _, v := range hdrs[xGoogHeaderKey] {
- xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
- }
-
- if hdrs[xGoogHeaderKey] != nil {
- // Replace the key instead of adding it, if there was anything to merge with.
- hdrs[xGoogHeaderKey] = []string{xGoogHeader}
- } else {
- // TODO: keep this line when removing the above code.
- ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
- }
-
+ ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID)
return ctx
}
@@ -138,14 +132,18 @@ func ShouldRetry(err error) bool {
return true
}
}
+ case *net.DNSError:
+ if e.IsTemporary {
+ return true
+ }
case interface{ Temporary() bool }:
if e.Temporary() {
return true
}
}
- // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC.
+ // UNAVAILABLE, RESOURCE_EXHAUSTED, INTERNAL, and DEADLINE_EXCEEDED codes are all retryable for gRPC.
if st, ok := status.FromError(err); ok {
- if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal {
+ if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal || code == codes.DeadlineExceeded {
return true
}
}
diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go
index 1d6cfdf5984d4..bc15900f01ead 100644
--- a/vendor/cloud.google.com/go/storage/notifications.go
+++ b/vendor/cloud.google.com/go/storage/notifications.go
@@ -21,7 +21,6 @@ import (
"regexp"
"cloud.google.com/go/internal/trace"
- "cloud.google.com/go/storage/internal/apiv2/storagepb"
raw "google.golang.org/api/storage/v1"
)
@@ -92,30 +91,6 @@ func toNotification(rn *raw.Notification) *Notification {
return n
}
-func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification {
- n := &Notification{
- ID: pbn.GetName(),
- EventTypes: pbn.GetEventTypes(),
- ObjectNamePrefix: pbn.GetObjectNamePrefix(),
- CustomAttributes: pbn.GetCustomAttributes(),
- PayloadFormat: pbn.GetPayloadFormat(),
- }
- n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic)
- return n
-}
-
-func toProtoNotification(n *Notification) *storagepb.NotificationConfig {
- return &storagepb.NotificationConfig{
- Name: n.ID,
- Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
- n.TopicProjectID, n.TopicID),
- EventTypes: n.EventTypes,
- ObjectNamePrefix: n.ObjectNamePrefix,
- CustomAttributes: n.CustomAttributes,
- PayloadFormat: n.PayloadFormat,
- }
-}
-
var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`)
// parseNotificationTopic extracts the project and topic IDs from from the full
@@ -144,6 +119,7 @@ func toRawNotification(n *Notification) *raw.Notification {
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
+// Note: gRPC is not supported.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
@@ -165,6 +141,7 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
+// Note: gRPC is not supported.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
@@ -182,15 +159,8 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
return m
}
-func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification {
- m := map[string]*Notification{}
- for _, n := range ns {
- m[n.Name] = toNotificationFromProto(n)
- }
- return m
-}
-
// DeleteNotification deletes the notification with the given ID.
+// Note: gRPC is not supported.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go
index debdb0f52d510..0fc82ed590642 100644
--- a/vendor/cloud.google.com/go/storage/option.go
+++ b/vendor/cloud.google.com/go/storage/option.go
@@ -22,8 +22,9 @@ import (
// storageConfig contains the Storage client option configuration that can be
// set through storageClientOptions.
type storageConfig struct {
- useJSONforReads bool
- readAPIWasSet bool
+ useJSONforReads bool
+ readAPIWasSet bool
+ disableClientMetrics bool
}
// newStorageConfig generates a new storageConfig with all the given
@@ -78,3 +79,32 @@ func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) {
c.useJSONforReads = w.useJSON
c.readAPIWasSet = true
}
+
+type withDisabledClientMetrics struct {
+ internaloption.EmbeddableAdapter
+ disabledClientMetrics bool
+}
+
+// WithDisabledClientMetrics is an option that may be passed to [NewClient].
+// gRPC metrics are enabled by default in the GCS client and will export the
+// gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
+// [Google Cloud Monitoring]. The option is used to disable metrics.
+// Google Cloud Support can use this information to more quickly diagnose
+// problems related to GCS and gRPC.
+// Sending this data does not incur any billing charges, and requires minimal
+// CPU (a single RPC every few minutes) or memory (a few KiB to batch the
+// telemetry).
+//
+// The default is to enable client metrics. To opt-out of metrics collected use
+// this option.
+//
+// [gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
+// [gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
+// [Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
+func WithDisabledClientMetrics() option.ClientOption {
+ return &withDisabledClientMetrics{disabledClientMetrics: true}
+}
+
+func (w *withDisabledClientMetrics) ApplyStorageOpt(c *storageConfig) {
+ c.disableClientMetrics = w.disabledClientMetrics
+}
diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go
index 6da2432f004ca..e1d96659282b5 100644
--- a/vendor/cloud.google.com/go/storage/reader.go
+++ b/vendor/cloud.google.com/go/storage/reader.go
@@ -65,6 +65,19 @@ type ReaderObjectAttrs struct {
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
+
+ // CRC32C is the CRC32 checksum of the entire object's content using the
+ // Castagnoli93 polynomial, if available.
+ CRC32C uint32
+
+ // Decompressed is true if the object is stored as a gzip file and was
+ // decompressed when read.
+ // Objects are automatically decompressed if the object's metadata property
+ // "Content-Encoding" is set to "gzip" or satisfies decompressive
+ // transcoding as per https://cloud.google.com/storage/docs/transcoding.
+ //
+ // To prevent decompression on reads, use [ObjectHandle.ReadCompressed].
+ Decompressed bool
}
// NewReader creates a new Reader to read the contents of the
@@ -91,7 +104,8 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
// that file will be served back whole, regardless of the requested range as
-// Google Cloud Storage dictates.
+// Google Cloud Storage dictates. If decompressive transcoding occurs,
+// [Reader.Attrs.Decompressed] will be true.
//
// By default, reads are made using the Cloud Storage XML API. We recommend
// using the JSON API instead, which can be done by setting [WithJSONReads]
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index b6316fa668f9a..0754dfef0bec8 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -1695,7 +1695,6 @@ type Query struct {
// IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of
// prefixes returned by the query. Only applicable if Delimiter is set to /.
- // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API.
IncludeFoldersAsPrefixes bool
// SoftDeleted indicates whether to list soft-deleted objects.
@@ -2350,6 +2349,7 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec
}
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
+// Note: gRPC is not supported.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
o := makeStorageOpts(true, c.retry, "")
return c.tc.GetServiceAccount(ctx, projectID, o...)
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
new file mode 100644
index 0000000000000..9515ee52055c5
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
@@ -0,0 +1,3 @@
+# GCP Resource detection library
+
+This is a library intended to be used by Upstream OpenTelemetry resource detectors. It exists within this repository to allow for integration testing of the detection functions in real GCP environments.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
new file mode 100644
index 0000000000000..0a36807033432
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
@@ -0,0 +1,76 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+const (
+ // See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules
+ // for the environment variables available in GAE environments.
+ gaeServiceEnv = "GAE_SERVICE"
+ gaeVersionEnv = "GAE_VERSION"
+ gaeInstanceEnv = "GAE_INSTANCE"
+ gaeEnv = "GAE_ENV"
+ gaeStandard = "standard"
+)
+
+func (d *Detector) onAppEngineStandard() bool {
+ // See https://cloud.google.com/appengine/docs/standard/go111/runtime#environment_variables.
+ env, found := d.os.LookupEnv(gaeEnv)
+ return found && env == gaeStandard
+}
+
+func (d *Detector) onAppEngine() bool {
+ _, found := d.os.LookupEnv(gaeServiceEnv)
+ return found
+}
+
+// AppEngineServiceName returns the service name of the app engine service.
+func (d *Detector) AppEngineServiceName() (string, error) {
+ if name, found := d.os.LookupEnv(gaeServiceEnv); found {
+ return name, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineServiceVersion returns the service version of the app engine service.
+func (d *Detector) AppEngineServiceVersion() (string, error) {
+ if version, found := d.os.LookupEnv(gaeVersionEnv); found {
+ return version, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineServiceInstance returns the service instance of the app engine service.
+func (d *Detector) AppEngineServiceInstance() (string, error) {
+ if instanceID, found := d.os.LookupEnv(gaeInstanceEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineFlexAvailabilityZoneAndRegion returns the zone and region in which this program is running.
+func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) {
+ // The GCE metadata server is available on App Engine Flex.
+ return d.GCEAvailabilityZoneAndRegion()
+}
+
+// AppEngineStandardAvailabilityZone returns the zone the app engine service is running in.
+func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) {
+ return d.metadata.Zone()
+}
+
+// AppEngineStandardCloudRegion returns the region the app engine service is running in.
+func (d *Detector) AppEngineStandardCloudRegion() (string, error) {
+ return d.FaaSCloudRegion()
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
new file mode 100644
index 0000000000000..d3992a4f7e417
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
@@ -0,0 +1,55 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+const (
+ bmsProjectIDEnv = "BMS_PROJECT_ID"
+ bmsRegionEnv = "BMS_REGION"
+ bmsInstanceIDEnv = "BMS_INSTANCE_ID"
+)
+
+// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying
+// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables.
+// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs
+func (d *Detector) onBareMetalSolution() bool {
+ projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv)
+ region, regionExists := d.os.LookupEnv(bmsRegionEnv)
+ instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv)
+ return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != ""
+}
+
+// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable.
+func (d *Detector) BareMetalSolutionInstanceID() (string, error) {
+ if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable.
+func (d *Detector) BareMetalSolutionCloudRegion() (string, error) {
+ if region, found := d.os.LookupEnv(bmsRegionEnv); found {
+ return region, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable.
+func (d *Detector) BareMetalSolutionProjectID() (string, error) {
+ if project, found := d.os.LookupEnv(bmsProjectIDEnv); found {
+ return project, nil
+ }
+ return "", errEnvVarNotFound
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
new file mode 100644
index 0000000000000..2cc62de09766b
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
@@ -0,0 +1,102 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "errors"
+ "os"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+var errEnvVarNotFound = errors.New("environment variable not found")
+
+// NewDetector returns a *Detector which can get detect the platform,
+// and fetch attributes of the platform on which it is running.
+func NewDetector() *Detector {
+ return &Detector{metadata: metadata.NewClient(nil), os: realOSProvider{}}
+}
+
+type Platform int64
+
+const (
+ UnknownPlatform Platform = iota
+ GKE
+ GCE
+ CloudRun
+ CloudRunJob
+ CloudFunctions
+ AppEngineStandard
+ AppEngineFlex
+ BareMetalSolution
+)
+
+// CloudPlatform returns the platform on which this program is running.
+func (d *Detector) CloudPlatform() Platform {
+ switch {
+ case d.onBareMetalSolution():
+ return BareMetalSolution
+ case d.onGKE():
+ return GKE
+ case d.onCloudFunctions():
+ return CloudFunctions
+ case d.onCloudRun():
+ return CloudRun
+ case d.onCloudRunJob():
+ return CloudRunJob
+ case d.onAppEngineStandard():
+ return AppEngineStandard
+ case d.onAppEngine():
+ return AppEngineFlex
+ case d.onGCE():
+ return GCE
+ }
+ return UnknownPlatform
+}
+
+// ProjectID returns the ID of the project in which this program is running.
+func (d *Detector) ProjectID() (string, error) {
+ return d.metadata.ProjectID()
+}
+
+// Detector collects resource information for all GCP platforms.
+type Detector struct {
+ metadata metadataProvider
+ os osProvider
+}
+
+// metadataProvider contains the subset of the metadata.Client functions used
+// by this resource Detector to allow testing with a fake implementation.
+type metadataProvider interface {
+ ProjectID() (string, error)
+ InstanceID() (string, error)
+ Get(string) (string, error)
+ InstanceName() (string, error)
+ Hostname() (string, error)
+ Zone() (string, error)
+ InstanceAttributeValue(string) (string, error)
+}
+
+// osProvider contains the subset of the os package functions used by.
+type osProvider interface {
+ LookupEnv(string) (string, bool)
+}
+
+// realOSProvider uses the os package to lookup env vars.
+type realOSProvider struct{}
+
+func (realOSProvider) LookupEnv(env string) (string, bool) {
+ return os.LookupEnv(env)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go
new file mode 100644
index 0000000000000..9277608dd6fc5
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go
@@ -0,0 +1,105 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "strings"
+)
+
+const (
+ // Cloud Functions env vars:
+ // https://cloud.google.com/functions/docs/configuring/env-var#newer_runtimes
+ //
+ // Cloud Run env vars:
+ // https://cloud.google.com/run/docs/container-contract#services-env-vars
+ //
+ // Cloud Run jobs env vars:
+ // https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ cloudFunctionsTargetEnv = "FUNCTION_TARGET"
+ cloudRunConfigurationEnv = "K_CONFIGURATION"
+ cloudRunJobsEnv = "CLOUD_RUN_JOB"
+ faasServiceEnv = "K_SERVICE"
+ faasRevisionEnv = "K_REVISION"
+ cloudRunJobExecutionEnv = "CLOUD_RUN_EXECUTION"
+ cloudRunJobTaskIndexEnv = "CLOUD_RUN_TASK_INDEX"
+ regionMetadataAttr = "instance/region"
+)
+
+func (d *Detector) onCloudFunctions() bool {
+ _, found := d.os.LookupEnv(cloudFunctionsTargetEnv)
+ return found
+}
+
+func (d *Detector) onCloudRun() bool {
+ _, found := d.os.LookupEnv(cloudRunConfigurationEnv)
+ return found
+}
+
+func (d *Detector) onCloudRunJob() bool {
+ _, found := d.os.LookupEnv(cloudRunJobsEnv)
+ return found
+}
+
+// FaaSName returns the name of the Cloud Run, Cloud Run jobs or Cloud Functions service.
+func (d *Detector) FaaSName() (string, error) {
+ if name, found := d.os.LookupEnv(faasServiceEnv); found {
+ return name, nil
+ }
+ if name, found := d.os.LookupEnv(cloudRunJobsEnv); found {
+ return name, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// FaaSVersion returns the revision of the Cloud Run or Cloud Functions service.
+func (d *Detector) FaaSVersion() (string, error) {
+ if version, found := d.os.LookupEnv(faasRevisionEnv); found {
+ return version, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// CloudRunJobExecution returns the execution id of the Cloud Run jobs.
+func (d *Detector) CloudRunJobExecution() (string, error) {
+ if eid, found := d.os.LookupEnv(cloudRunJobExecutionEnv); found {
+ return eid, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// CloudRunJobTaskIndex returns the task index for the execution of the Cloud Run jobs.
+func (d *Detector) CloudRunJobTaskIndex() (string, error) {
+ if tidx, found := d.os.LookupEnv(cloudRunJobTaskIndexEnv); found {
+ return tidx, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// FaaSID returns the instance id of the Cloud Run or Cloud Function.
+func (d *Detector) FaaSID() (string, error) {
+ return d.metadata.InstanceID()
+}
+
+// FaaSCloudRegion detects region from the metadata server.
+// It is in the format /projects/<project_number>/regions/<region>.
+//
+// https://cloud.google.com/run/docs/reference/container-contract#metadata-server
+func (d *Detector) FaaSCloudRegion() (string, error) {
+ region, err := d.metadata.Get(regionMetadataAttr)
+ if err != nil {
+ return "", err
+ }
+ return region[strings.LastIndex(region, "/")+1:], nil
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go
new file mode 100644
index 0000000000000..37259fc451bdd
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go
@@ -0,0 +1,75 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "fmt"
+ "strings"
+)
+
+// See the available GCE instance metadata:
+// https://cloud.google.com/compute/docs/metadata/default-metadata-values#vm_instance_metadata
+const machineTypeMetadataAttr = "instance/machine-type"
+
+func (d *Detector) onGCE() bool {
+ _, err := d.metadata.Get(machineTypeMetadataAttr)
+ return err == nil
+}
+
+// GCEHostType returns the machine type of the instance on which this program is running.
+func (d *Detector) GCEHostType() (string, error) {
+ return d.metadata.Get(machineTypeMetadataAttr)
+}
+
+// GCEHostID returns the instance ID of the instance on which this program is running.
+func (d *Detector) GCEHostID() (string, error) {
+ return d.metadata.InstanceID()
+}
+
+// GCEHostName returns the instance name of the instance on which this program is running.
+// Recommended to use GCEInstanceName() or GCEInstanceHostname() to more accurately reflect which
+// value is returned.
+func (d *Detector) GCEHostName() (string, error) {
+ return d.metadata.InstanceName()
+}
+
+// GCEInstanceName returns the instance name of the instance on which this program is running.
+// This is the value visible in the Cloud Console UI, and the prefix for the default hostname
+// of the instance as defined by the default internal DNS name (see https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func (d *Detector) GCEInstanceName() (string, error) {
+ return d.metadata.InstanceName()
+}
+
+// GCEInstanceHostname returns the full value of the default or custom hostname of the instance
+// on which this program is running. See https://cloud.google.com/compute/docs/instances/custom-hostname-vm.
+func (d *Detector) GCEInstanceHostname() (string, error) {
+ return d.metadata.Hostname()
+}
+
+// GCEAvailabilityZoneAndRegion returns the zone and region in which this program is running.
+func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) {
+ zone, err := d.metadata.Zone()
+ if err != nil {
+ return "", "", err
+ }
+ if zone == "" {
+ return "", "", fmt.Errorf("no zone detected from GCE metadata server")
+ }
+ splitZone := strings.SplitN(zone, "-", 3)
+ if len(splitZone) != 3 {
+ return "", "", fmt.Errorf("zone was not in the expected format: country-region-zone. Got %v", zone)
+ }
+ return zone, strings.Join(splitZone[0:2], "-"), nil
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go
new file mode 100644
index 0000000000000..67ed972b2326d
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go
@@ -0,0 +1,70 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ // If the kubernetes.default.svc service exists in the cluster,
+ // then the KUBERNETES_SERVICE_HOST env var will be populated.
+ // Use this as an indication that we are running on kubernetes.
+ k8sServiceHostEnv = "KUBERNETES_SERVICE_HOST"
+ // See the available GKE metadata:
+ // https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity#instance_metadata
+ clusterNameMetadataAttr = "cluster-name"
+ clusterLocationMetadataAttr = "cluster-location"
+)
+
+func (d *Detector) onGKE() bool {
+ _, found := d.os.LookupEnv(k8sServiceHostEnv)
+ return found
+}
+
+// GKEHostID returns the instance ID of the instance on which this program is running.
+func (d *Detector) GKEHostID() (string, error) {
+ return d.GCEHostID()
+}
+
+// GKEClusterName returns the name if the GKE cluster in which this program is running.
+func (d *Detector) GKEClusterName() (string, error) {
+ return d.metadata.InstanceAttributeValue(clusterNameMetadataAttr)
+}
+
+type LocationType int64
+
+const (
+ UndefinedLocation LocationType = iota
+ Zone
+ Region
+)
+
+// GKEAvailabilityZoneOrRegion returns the location of the cluster and whether the cluster is zonal or regional.
+func (d *Detector) GKEAvailabilityZoneOrRegion() (string, LocationType, error) {
+ clusterLocation, err := d.metadata.InstanceAttributeValue(clusterLocationMetadataAttr)
+ if err != nil {
+ return "", UndefinedLocation, err
+ }
+ switch strings.Count(clusterLocation, "-") {
+ case 1:
+ return clusterLocation, Region, nil
+ case 2:
+ return clusterLocation, Zone, nil
+ default:
+ return "", UndefinedLocation, fmt.Errorf("unrecognized format for cluster location: %v", clusterLocation)
+ }
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md
new file mode 100644
index 0000000000000..c77d5eb154461
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md
@@ -0,0 +1,37 @@
+# OpenTelemetry Google Cloud Monitoring Exporter
+
+[](https://pkg.go.dev/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric)
+[![Apache License][license-image]][license-url]
+
+OpenTelemetry Google Cloud Monitoring Exporter allow the user to send collected metrics to Google Cloud.
+
+[Google Cloud Monitoring](https://cloud.google.com/monitoring) provides visibility into the performance, uptime, and overall health of cloud-powered applications. It collects metrics, events, and metadata from Google Cloud, Amazon Web Services, hosted uptime probes, application instrumentation, and a variety of common application components including Cassandra, Nginx, Apache Web Server, Elasticsearch, and many others. Operations ingests that data and generates insights via dashboards, charts, and alerts. Cloud Monitoring alerting helps you collaborate by integrating with Slack, PagerDuty, and more.
+
+## Setup
+
+Google Cloud Monitoring is a managed service provided by Google Cloud Platform. Google Cloud Monitoring requires to set up "Workspace" in advance. The guide to create a new Workspace is available on [the official document](https://cloud.google.com/monitoring/workspaces/create).
+
+## Authentication
+
+The Google Cloud Monitoring exporter depends upon [`google.FindDefaultCredentials`](https://pkg.go.dev/golang.org/x/oauth2/google?tab=doc#FindDefaultCredentials), so the service account is automatically detected by default, but also the custom credential file (so called `service_account_key.json`) can be detected with specific conditions. Quoting from the document of `google.FindDefaultCredentials`:
+
+* A JSON file whose path is specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable.
+* A JSON file in a location known to the gcloud command-line tool. On Windows, this is `%APPDATA%/gcloud/application_default_credentials.json`. On other systems, `$HOME/.config/gcloud/application_default_credentials.json`.
+
+When running code locally, you may need to specify a Google Project ID in addition to `GOOGLE_APPLICATION_CREDENTIALS`. This is best done using an environment variable (e.g. `GOOGLE_CLOUD_PROJECT`) and the `metric.WithProjectID` method, e.g.:
+
+```golang
+projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
+opts := []mexporter.Option{
+ mexporter.WithProjectID(projectID),
+}
+```
+
+## Useful links
+
+* For more information on OpenTelemetry, visit: https://opentelemetry.io/
+* For more about OpenTelemetry Go, visit: https://github.com/open-telemetry/opentelemetry-go
+* Learn more about Google Cloud Monitoring at https://cloud.google.com/monitoring
+
+[license-url]: https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE
+[license-image]: https://img.shields.io/badge/license-Apache_2.0-green.svg?style=flat
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go
new file mode 100644
index 0000000000000..90dfcb344e329
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go
@@ -0,0 +1,49 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ sdkmetric "go.opentelemetry.io/otel/sdk/metric"
+
+ monitoring "cloud.google.com/go/monitoring/apiv3/v2"
+ "golang.org/x/oauth2/google"
+)
+
+// New creates a new Exporter thats implements metric.Exporter.
+func New(opts ...Option) (sdkmetric.Exporter, error) {
+ o := options{
+ context: context.Background(),
+ resourceAttributeFilter: DefaultResourceAttributesFilter,
+ }
+ for _, opt := range opts {
+ opt(&o)
+ }
+
+ if o.projectID == "" {
+ creds, err := google.FindDefaultCredentials(o.context, monitoring.DefaultAuthScopes()...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find Google Cloud credentials: %v", err)
+ }
+ if creds.ProjectID == "" {
+ return nil, errors.New("google cloud monitoring: no project found with application default credentials")
+ }
+ o.projectID = creds.ProjectID
+ }
+ return newMetricExporter(&o)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go
new file mode 100644
index 0000000000000..57329a4bdc30e
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go
@@ -0,0 +1,97 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// TODO: remove this file when the constants are ready in the Go SDK
+
+// Mappings for the well-known OpenTelemetry resource label keys
+// to applicable Monitored Resource label keys.
+// A uniquely identifying name for the Kubernetes cluster. Kubernetes
+// does not have cluster names as an internal concept so this may be
+// set to any meaningful value within the environment. For example,
+// GKE clusters have a name which can be used for this label.
+const (
+ // Deprecated: use semconv.CloudProviderKey instead.
+ CloudKeyProvider = "cloud.provider"
+ // Deprecated: use semconv.CloudAccountIDKey instead.
+ CloudKeyAccountID = "cloud.account.id"
+ // Deprecated: use semconv.CloudRegionKey instead.
+ CloudKeyRegion = "cloud.region"
+ // Deprecated: use semconv.CloudAvailabilityZoneKey instead.
+ CloudKeyZone = "cloud.availability_zone"
+
+ // Deprecated: use semconv.ServiceNamespaceKey instead.
+ ServiceKeyNamespace = "service.namespace"
+ // Deprecated: use semconv.ServiceInstanceIDKey instead.
+ ServiceKeyInstanceID = "service.instance.id"
+ // Deprecated: use semconv.ServiceNameKey instead.
+ ServiceKeyName = "service.name"
+
+ // Deprecated: HostType is not needed.
+ HostType = "host"
+ // A uniquely identifying name for the host.
+ // Deprecated: use semconv.HostNameKey instead.
+ HostKeyName = "host.name"
+ // A hostname as returned by the 'hostname' command on host machine.
+ // Deprecated: HostKeyHostName is not needed.
+ HostKeyHostName = "host.hostname"
+ // Deprecated: use semconv.HostIDKey instead.
+ HostKeyID = "host.id"
+ // Deprecated: use semconv.HostTypeKey instead.
+ HostKeyType = "host.type"
+
+ // A uniquely identifying name for the Container.
+ // Deprecated: use semconv.ContainerNameKey instead.
+ ContainerKeyName = "container.name"
+ // Deprecated: use semconv.ContainerImageNameKey instead.
+ ContainerKeyImageName = "container.image.name"
+ // Deprecated: use semconv.ContainerImageTagKey instead.
+ ContainerKeyImageTag = "container.image.tag"
+
+ // Cloud Providers
+ // Deprecated: use semconv.CloudProviderAWS instead.
+ CloudProviderAWS = "aws"
+ // Deprecated: use semconv.CloudProviderGCP instead.
+ CloudProviderGCP = "gcp"
+ // Deprecated: use semconv.CloudProviderAzure instead.
+ CloudProviderAZURE = "azure"
+
+ // Deprecated: Use "k8s" instead. This should not be needed.
+ K8S = "k8s"
+ // Deprecated: use semconv.K8SClusterNameKey instead.
+ K8SKeyClusterName = "k8s.cluster.name"
+ // Deprecated: use semconv.K8SNamespaceNameKey instead.
+ K8SKeyNamespaceName = "k8s.namespace.name"
+ // Deprecated: use semconv.K8SPodNameKey instead.
+ K8SKeyPodName = "k8s.pod.name"
+ // Deprecated: use semconv.K8SDeploymentNameKey instead.
+ K8SKeyDeploymentName = "k8s.deployment.name"
+
+ // Monitored Resources types
+ // Deprecated: Use "k8s_container" instead.
+ K8SContainer = "k8s_container"
+ // Deprecated: Use "k8s_node" instead.
+ K8SNode = "k8s_node"
+ // Deprecated: Use "k8s_pod" instead.
+ K8SPod = "k8s_pod"
+ // Deprecated: Use "k8s_cluster" instead.
+ K8SCluster = "k8s_cluster"
+ // Deprecated: Use "gce_instance" instead.
+ GCEInstance = "gce_instance"
+ // Deprecated: Use "aws_ec2_instance" instead.
+ AWSEC2Instance = "aws_ec2_instance"
+ // Deprecated: Use "generic_task" instead.
+ GenericTask = "generic_task"
+)
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go
new file mode 100644
index 0000000000000..974c0af950873
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go
@@ -0,0 +1,32 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ errBlankProjectID = errors.New("expecting a non-blank ProjectID")
+)
+
+type errUnexpectedAggregationKind struct {
+ kind string
+}
+
+func (e errUnexpectedAggregationKind) Error() string {
+ return fmt.Sprintf("the metric kind is unexpected: %v", e.kind)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go
new file mode 100644
index 0000000000000..ba0012e25a916
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go
@@ -0,0 +1,890 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+
+ monitoring "cloud.google.com/go/monitoring/apiv3/v2"
+ "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/option"
+ "google.golang.org/genproto/googleapis/api/distribution"
+ "google.golang.org/genproto/googleapis/api/label"
+ googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/encoding/gzip"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/protobuf/types/known/anypb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping"
+)
+
+const (
+ // The number of timeserieses to send to GCM in a single request. This
+ // is a hard limit in the GCM API, so we never want to exceed 200.
+ sendBatchSize = 200
+
+ cloudMonitoringMetricDescriptorNameFormat = "workload.googleapis.com/%s"
+ platformMappingMonitoredResourceKey = "gcp.resource_type"
+)
+
+// key is used to judge the uniqueness of the record descriptor.
+type key struct {
+ name string
+ libraryname string
+}
+
+func keyOf(metrics metricdata.Metrics, library instrumentation.Library) key {
+ return key{
+ name: metrics.Name,
+ libraryname: library.Name,
+ }
+}
+
+// metricExporter is the implementation of OpenTelemetry metric exporter for
+// Google Cloud Monitoring.
+type metricExporter struct {
+ o *options
+ shutdown chan struct{}
+ // mdCache is the cache to hold MetricDescriptor to avoid creating duplicate MD.
+ mdCache map[key]*googlemetricpb.MetricDescriptor
+ client *monitoring.MetricClient
+ mdLock sync.RWMutex
+ shutdownOnce sync.Once
+}
+
+// ForceFlush does nothing, the exporter holds no state.
+func (e *metricExporter) ForceFlush(ctx context.Context) error { return ctx.Err() }
+
+// Shutdown shuts down the client connections.
+func (e *metricExporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ close(e.shutdown)
+ err = errors.Join(ctx.Err(), e.client.Close())
+ })
+ return err
+}
+
+// newMetricExporter returns an exporter that uploads OTel metric data to Google Cloud Monitoring.
+func newMetricExporter(o *options) (*metricExporter, error) {
+ if strings.TrimSpace(o.projectID) == "" {
+ return nil, errBlankProjectID
+ }
+
+ clientOpts := append([]option.ClientOption{option.WithGRPCDialOption(grpc.WithUserAgent(userAgent))}, o.monitoringClientOptions...)
+ ctx := o.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ client, err := monitoring.NewMetricClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if o.compression == "gzip" {
+ client.CallOptions.GetMetricDescriptor = append(client.CallOptions.GetMetricDescriptor,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateMetricDescriptor = append(client.CallOptions.CreateMetricDescriptor,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateTimeSeries = append(client.CallOptions.CreateTimeSeries,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateServiceTimeSeries = append(client.CallOptions.CreateServiceTimeSeries,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ }
+
+ cache := map[key]*googlemetricpb.MetricDescriptor{}
+ e := &metricExporter{
+ o: o,
+ mdCache: cache,
+ client: client,
+ shutdown: make(chan struct{}),
+ }
+ return e, nil
+}
+
+var errShutdown = fmt.Errorf("exporter is shutdown")
+
+// Export exports OpenTelemetry Metrics to Google Cloud Monitoring.
+func (me *metricExporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ select {
+ case <-me.shutdown:
+ return errShutdown
+ default:
+ }
+
+ if me.o.destinationProjectQuota {
+ ctx = metadata.NewOutgoingContext(ctx, metadata.New(map[string]string{"x-goog-user-project": strings.TrimPrefix(me.o.projectID, "projects/")}))
+ }
+ return errors.Join(
+ me.exportMetricDescriptor(ctx, rm),
+ me.exportTimeSeries(ctx, rm),
+ )
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (me *metricExporter) Temporality(ik metric.InstrumentKind) metricdata.Temporality {
+ return metric.DefaultTemporalitySelector(ik)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (me *metricExporter) Aggregation(ik metric.InstrumentKind) metric.Aggregation {
+ return metric.DefaultAggregationSelector(ik)
+}
+
+// exportMetricDescriptor create MetricDescriptor from the record
+// if the descriptor is not registered in Cloud Monitoring yet.
+func (me *metricExporter) exportMetricDescriptor(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ // We only send metric descriptors if we're configured *and* we're not sending service timeseries.
+ if me.o.disableCreateMetricDescriptors {
+ return nil
+ }
+
+ me.mdLock.Lock()
+ defer me.mdLock.Unlock()
+ mds := make(map[key]*googlemetricpb.MetricDescriptor)
+ extraLabels := me.extraLabelsFromResource(rm.Resource)
+ for _, scope := range rm.ScopeMetrics {
+ for _, metrics := range scope.Metrics {
+ k := keyOf(metrics, scope.Scope)
+
+ if _, ok := me.mdCache[k]; ok {
+ continue
+ }
+
+ if _, localok := mds[k]; !localok {
+ md := me.recordToMdpb(metrics, extraLabels)
+ mds[k] = md
+ }
+ }
+ }
+
+ // TODO: This process is synchronous and blocks longer time if records in cps
+ // have many different descriptors. In the cps.ForEach above, it should spawn
+ // goroutines to send CreateMetricDescriptorRequest asynchronously in the case
+ // the descriptor does not exist in global cache (me.mdCache).
+ // See details in #26.
+ var errs []error
+ for kmd, md := range mds {
+ err := me.createMetricDescriptorIfNeeded(ctx, md)
+ if err == nil {
+ me.mdCache[kmd] = md
+ }
+ errs = append(errs, err)
+ }
+ return errors.Join(errs...)
+}
+
+func (me *metricExporter) createMetricDescriptorIfNeeded(ctx context.Context, md *googlemetricpb.MetricDescriptor) error {
+ mdReq := &monitoringpb.GetMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", me.o.projectID, md.Type),
+ }
+ _, err := me.client.GetMetricDescriptor(ctx, mdReq)
+ if err == nil {
+ // If the metric descriptor already exists, skip the CreateMetricDescriptor call.
+ // Metric descriptors cannot be updated without deleting them first, so there
+ // isn't anything we can do here:
+ // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#md-modify
+ return nil
+ }
+ req := &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", me.o.projectID),
+ MetricDescriptor: md,
+ }
+ _, err = me.client.CreateMetricDescriptor(ctx, req)
+ return err
+}
+
+// exportTimeSeries create TimeSeries from the records in cps.
+// res should be the common resource among all TimeSeries, such as instance id, application name and so on.
+func (me *metricExporter) exportTimeSeries(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ tss, err := me.recordsToTspbs(rm)
+ if len(tss) == 0 {
+ return err
+ }
+
+ name := fmt.Sprintf("projects/%s", me.o.projectID)
+
+ errs := []error{err}
+ for i := 0; i < len(tss); i += sendBatchSize {
+ j := i + sendBatchSize
+ if j >= len(tss) {
+ j = len(tss)
+ }
+
+ // TODO: When this exporter is rewritten, support writing to multiple
+ // projects based on the "gcp.project.id" resource.
+ req := &monitoringpb.CreateTimeSeriesRequest{
+ Name: name,
+ TimeSeries: tss[i:j],
+ }
+ if me.o.createServiceTimeSeries {
+ errs = append(errs, me.client.CreateServiceTimeSeries(ctx, req))
+ } else {
+ errs = append(errs, me.client.CreateTimeSeries(ctx, req))
+ }
+ }
+
+ return errors.Join(errs...)
+}
+
+func (me *metricExporter) extraLabelsFromResource(res *resource.Resource) *attribute.Set {
+ set, _ := attribute.NewSetWithFiltered(res.Attributes(), me.o.resourceAttributeFilter)
+ return &set
+}
+
+// descToMetricType converts descriptor to MetricType proto type.
+// Basically this returns default value ("workload.googleapis.com/[metric type]").
+func (me *metricExporter) descToMetricType(desc metricdata.Metrics) string {
+ if formatter := me.o.metricDescriptorTypeFormatter; formatter != nil {
+ return formatter(desc)
+ }
+ return fmt.Sprintf(cloudMonitoringMetricDescriptorNameFormat, desc.Name)
+}
+
+// metricTypeToDisplayName takes a GCM metric type, like (workload.googleapis.com/MyCoolMetric) and returns the display name.
+func metricTypeToDisplayName(mURL string) string {
+ // strip domain, keep path after domain.
+ u, err := url.Parse(fmt.Sprintf("metrics://%s", mURL))
+ if err != nil || u.Path == "" {
+ return mURL
+ }
+ return strings.TrimLeft(u.Path, "/")
+}
+
+// recordToMdpb extracts data and converts them to googlemetricpb.MetricDescriptor.
+func (me *metricExporter) recordToMdpb(metrics metricdata.Metrics, extraLabels *attribute.Set) *googlemetricpb.MetricDescriptor {
+ name := metrics.Name
+ typ := me.descToMetricType(metrics)
+ kind, valueType := recordToMdpbKindType(metrics.Data)
+
+ // Detailed explanations on MetricDescriptor proto is not documented on
+ // generated Go packages. Refer to the original proto file.
+ // https://github.com/googleapis/googleapis/blob/50af053/google/api/metric.proto#L33
+ return &googlemetricpb.MetricDescriptor{
+ Name: name,
+ DisplayName: metricTypeToDisplayName(typ),
+ Type: typ,
+ MetricKind: kind,
+ ValueType: valueType,
+ Unit: string(metrics.Unit),
+ Description: metrics.Description,
+ Labels: labelDescriptors(metrics, extraLabels),
+ }
+}
+
+func labelDescriptors(metrics metricdata.Metrics, extraLabels *attribute.Set) []*label.LabelDescriptor {
+ labels := []*label.LabelDescriptor{}
+ seenKeys := map[string]struct{}{}
+ addAttributes := func(attr *attribute.Set) {
+ iter := attr.Iter()
+ for iter.Next() {
+ kv := iter.Attribute()
+ // Skip keys that have already been set
+ if _, ok := seenKeys[normalizeLabelKey(string(kv.Key))]; ok {
+ continue
+ }
+ labels = append(labels, &label.LabelDescriptor{
+ Key: normalizeLabelKey(string(kv.Key)),
+ })
+ seenKeys[normalizeLabelKey(string(kv.Key))] = struct{}{}
+ }
+ }
+ addAttributes(extraLabels)
+ switch a := metrics.Data.(type) {
+ case metricdata.Gauge[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Gauge[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Sum[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Sum[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Histogram[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Histogram[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ }
+ return labels
+}
+
+type attributes struct {
+ attrs attribute.Set
+}
+
+func (attrs *attributes) GetString(key string) (string, bool) {
+ value, ok := attrs.attrs.Value(attribute.Key(key))
+ return value.AsString(), ok
+}
+
+// resourceToMonitoredResourcepb converts resource in OTel to MonitoredResource
+// proto type for Cloud Monitoring.
+//
+// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors
+func (me *metricExporter) resourceToMonitoredResourcepb(res *resource.Resource) *monitoredrespb.MonitoredResource {
+ platformMrType, platformMappingRequested := res.Set().Value(platformMappingMonitoredResourceKey)
+
+ // check if platform mapping is requested and possible
+ if platformMappingRequested && platformMrType.AsString() == me.o.monitoredResourceDescription.mrType {
+ // assemble attributes required to construct this MR
+ attributeMap := make(map[string]string)
+ for expectedLabel := range me.o.monitoredResourceDescription.mrLabels {
+ value, found := res.Set().Value(attribute.Key(expectedLabel))
+ if found {
+ attributeMap[expectedLabel] = value.AsString()
+ }
+ }
+ return &monitoredrespb.MonitoredResource{
+ Type: platformMrType.AsString(),
+ Labels: attributeMap,
+ }
+ }
+
+ gmr := resourcemapping.ResourceAttributesToMonitoringMonitoredResource(&attributes{
+ attrs: attribute.NewSet(res.Attributes()...),
+ })
+ newLabels := make(map[string]string, len(gmr.Labels))
+ for k, v := range gmr.Labels {
+ newLabels[k] = sanitizeUTF8(v)
+ }
+ mr := &monitoredrespb.MonitoredResource{
+ Type: gmr.Type,
+ Labels: newLabels,
+ }
+ return mr
+}
+
+// recordToMdpbKindType return the mapping from OTel's record descriptor to
+// Cloud Monitoring's MetricKind and ValueType.
+func recordToMdpbKindType(a metricdata.Aggregation) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) {
+ switch agg := a.(type) {
+ case metricdata.Gauge[int64]:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+ case metricdata.Gauge[float64]:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+ case metricdata.Sum[int64]:
+ if agg.IsMonotonic {
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64
+ }
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+ case metricdata.Sum[float64]:
+ if agg.IsMonotonic {
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE
+ }
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+ case metricdata.Histogram[int64], metricdata.Histogram[float64]:
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION
+ default:
+ return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
+ }
+}
+
+// recordToMpb converts data from records to Metric proto type for Cloud Monitoring.
+func (me *metricExporter) recordToMpb(metrics metricdata.Metrics, attributes attribute.Set, library instrumentation.Library, extraLabels *attribute.Set) *googlemetricpb.Metric {
+ me.mdLock.RLock()
+ defer me.mdLock.RUnlock()
+ k := keyOf(metrics, library)
+ md, ok := me.mdCache[k]
+ if !ok {
+ md = me.recordToMdpb(metrics, extraLabels)
+ }
+
+ labels := make(map[string]string)
+ addAttributes := func(attr *attribute.Set) {
+ iter := attr.Iter()
+ for iter.Next() {
+ kv := iter.Attribute()
+ labels[normalizeLabelKey(string(kv.Key))] = sanitizeUTF8(kv.Value.Emit())
+ }
+ }
+ addAttributes(extraLabels)
+ addAttributes(&attributes)
+
+ return &googlemetricpb.Metric{
+ Type: md.Type,
+ Labels: labels,
+ }
+}
+
+// recordToTspb converts record to TimeSeries proto type with common resource.
+// ref. https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries
+func (me *metricExporter) recordToTspb(m metricdata.Metrics, mr *monitoredrespb.MonitoredResource, library instrumentation.Scope, extraLabels *attribute.Set) ([]*monitoringpb.TimeSeries, error) {
+ var tss []*monitoringpb.TimeSeries
+ var errs []error
+ if m.Data == nil {
+ return nil, nil
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := gaugeToTimeSeries[int64](point, m, mr)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Gauge[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := gaugeToTimeSeries[float64](point, m, mr)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Sum[int64]:
+ for _, point := range a.DataPoints {
+ var ts *monitoringpb.TimeSeries
+ var err error
+ if a.IsMonotonic {
+ ts, err = sumToTimeSeries[int64](point, m, mr)
+ } else {
+ // Send non-monotonic sums as gauges
+ ts, err = gaugeToTimeSeries[int64](point, m, mr)
+ }
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Sum[float64]:
+ for _, point := range a.DataPoints {
+ var ts *monitoringpb.TimeSeries
+ var err error
+ if a.IsMonotonic {
+ ts, err = sumToTimeSeries[float64](point, m, mr)
+ } else {
+ // Send non-monotonic sums as gauges
+ ts, err = gaugeToTimeSeries[float64](point, m, mr)
+ }
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Histogram[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Histogram[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.ExponentialHistogram[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.ExponentialHistogram[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ default:
+ errs = append(errs, errUnexpectedAggregationKind{kind: reflect.TypeOf(m.Data).String()})
+ }
+ return tss, errors.Join(errs...)
+}
+
+func (me *metricExporter) recordsToTspbs(rm *metricdata.ResourceMetrics) ([]*monitoringpb.TimeSeries, error) {
+ mr := me.resourceToMonitoredResourcepb(rm.Resource)
+ extraLabels := me.extraLabelsFromResource(rm.Resource)
+
+ var (
+ tss []*monitoringpb.TimeSeries
+ errs []error
+ )
+ for _, scope := range rm.ScopeMetrics {
+ for _, metrics := range scope.Metrics {
+ ts, err := me.recordToTspb(metrics, mr, scope.Scope, extraLabels)
+ errs = append(errs, err)
+ tss = append(tss, ts...)
+ }
+ }
+
+ return tss, errors.Join(errs...)
+}
+
+func sanitizeUTF8(s string) string {
+ return strings.ToValidUTF8(s, "�")
+}
+
+func gaugeToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) {
+ value, valueType := numberDataPointToValue(point)
+ timestamp := timestamppb.New(point.Time)
+ if err := timestamp.CheckValid(); err != nil {
+ return nil, err
+ }
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_GAUGE,
+ ValueType: valueType,
+ Points: []*monitoringpb.Point{{
+ Interval: &monitoringpb.TimeInterval{
+ EndTime: timestamp,
+ },
+ Value: value,
+ }},
+ }, nil
+}
+
+func sumToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ value, valueType := numberDataPointToValue[N](point)
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: valueType,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: value,
+ }},
+ }, nil
+}
+
+// TODO(@dashpole): Refactor to pass control-coupling lint check.
+//
+//nolint:revive
+func histogramToTimeSeries[N int64 | float64](point metricdata.HistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ distributionValue := histToDistribution(point, projectID)
+ if enableSOSD {
+ setSumOfSquaredDeviation(point, distributionValue)
+ }
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: distributionValue,
+ },
+ },
+ }},
+ }, nil
+}
+
+func expHistogramToTimeSeries[N int64 | float64](point metricdata.ExponentialHistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ distributionValue := expHistToDistribution(point, projectID)
+ // TODO: Implement "setSumOfSquaredDeviationExpHist" for parameter "enableSOSD" functionality.
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: distributionValue,
+ },
+ },
+ }},
+ }, nil
+}
+
+func toNonemptyTimeIntervalpb(start, end time.Time) (*monitoringpb.TimeInterval, error) {
+ // The end time of a new interval must be at least a millisecond after the end time of the
+ // previous interval, for all non-gauge types.
+ // https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#timeinterval
+ if end.Sub(start).Milliseconds() <= 1 {
+ end = start.Add(time.Millisecond)
+ }
+ startpb := timestamppb.New(start)
+ endpb := timestamppb.New(end)
+ err := errors.Join(
+ startpb.CheckValid(),
+ endpb.CheckValid(),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &monitoringpb.TimeInterval{
+ StartTime: startpb,
+ EndTime: endpb,
+ }, nil
+}
+
+func histToDistribution[N int64 | float64](hist metricdata.HistogramDataPoint[N], projectID string) *distribution.Distribution {
+ counts := make([]int64, len(hist.BucketCounts))
+ for i, v := range hist.BucketCounts {
+ counts[i] = int64(v)
+ }
+ var mean float64
+ if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero
+ mean = float64(hist.Sum) / float64(hist.Count)
+ }
+ return &distribution.Distribution{
+ Count: int64(hist.Count),
+ Mean: mean,
+ BucketCounts: counts,
+ BucketOptions: &distribution.Distribution_BucketOptions{
+ Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{
+ Bounds: hist.Bounds,
+ },
+ },
+ },
+ Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID),
+ }
+}
+
+func expHistToDistribution[N int64 | float64](hist metricdata.ExponentialHistogramDataPoint[N], projectID string) *distribution.Distribution {
+ // First calculate underflow bucket with all negatives + zeros.
+ underflow := hist.ZeroCount
+ negativeBuckets := hist.NegativeBucket.Counts
+ for i := 0; i < len(negativeBuckets); i++ {
+ underflow += negativeBuckets[i]
+ }
+
+ // Next, pull in remaining buckets.
+ counts := make([]int64, len(hist.PositiveBucket.Counts)+2)
+ bucketOptions := &distribution.Distribution_BucketOptions{}
+ counts[0] = int64(underflow)
+ positiveBuckets := hist.PositiveBucket.Counts
+ for i := 0; i < len(positiveBuckets); i++ {
+ counts[i+1] = int64(positiveBuckets[i])
+ }
+ // Overflow bucket is always empty
+ counts[len(counts)-1] = 0
+
+ if len(hist.PositiveBucket.Counts) == 0 {
+ // We cannot send exponential distributions with no positive buckets,
+ // instead we send a simple overflow/underflow histogram.
+ bucketOptions.Options = &distribution.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{
+ Bounds: []float64{0},
+ },
+ }
+ } else {
+ // Exponential histogram
+ growth := math.Exp2(math.Exp2(-float64(hist.Scale)))
+ scale := math.Pow(growth, float64(hist.PositiveBucket.Offset))
+ bucketOptions.Options = &distribution.Distribution_BucketOptions_ExponentialBuckets{
+ ExponentialBuckets: &distribution.Distribution_BucketOptions_Exponential{
+ GrowthFactor: growth,
+ Scale: scale,
+ NumFiniteBuckets: int32(len(counts) - 2),
+ },
+ }
+ }
+
+ var mean float64
+ if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero
+ mean = float64(hist.Sum) / float64(hist.Count)
+ }
+
+ return &distribution.Distribution{
+ Count: int64(hist.Count),
+ Mean: mean,
+ BucketCounts: counts,
+ BucketOptions: bucketOptions,
+ Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID),
+ }
+}
+
+func toDistributionExemplar[N int64 | float64](Exemplars []metricdata.Exemplar[N], projectID string) []*distribution.Distribution_Exemplar {
+ var exemplars []*distribution.Distribution_Exemplar
+ for _, e := range Exemplars {
+ attachments := []*anypb.Any{}
+ if hasValidSpanContext(e) {
+ sctx, err := anypb.New(&monitoringpb.SpanContext{
+ SpanName: fmt.Sprintf("projects/%s/traces/%s/spans/%s", projectID, hex.EncodeToString(e.TraceID[:]), hex.EncodeToString(e.SpanID[:])),
+ })
+ if err == nil {
+ attachments = append(attachments, sctx)
+ }
+ }
+ if len(e.FilteredAttributes) > 0 {
+ attr, err := anypb.New(&monitoringpb.DroppedLabels{
+ Label: attributesToLabels(e.FilteredAttributes),
+ })
+ if err == nil {
+ attachments = append(attachments, attr)
+ }
+ }
+ exemplars = append(exemplars, &distribution.Distribution_Exemplar{
+ Value: float64(e.Value),
+ Timestamp: timestamppb.New(e.Time),
+ Attachments: attachments,
+ })
+ }
+ sort.Slice(exemplars, func(i, j int) bool {
+ return exemplars[i].Value < exemplars[j].Value
+ })
+ return exemplars
+}
+
+func attributesToLabels(attrs []attribute.KeyValue) map[string]string {
+ labels := make(map[string]string, len(attrs))
+ for _, attr := range attrs {
+ labels[normalizeLabelKey(string(attr.Key))] = sanitizeUTF8(attr.Value.Emit())
+ }
+ return labels
+}
+
+var (
+ nilTraceID trace.TraceID
+ nilSpanID trace.SpanID
+)
+
+func hasValidSpanContext[N int64 | float64](e metricdata.Exemplar[N]) bool {
+ return !bytes.Equal(e.TraceID[:], nilTraceID[:]) && !bytes.Equal(e.SpanID[:], nilSpanID[:])
+}
+
+func setSumOfSquaredDeviation[N int64 | float64](hist metricdata.HistogramDataPoint[N], dist *distribution.Distribution) {
+ var prevBound float64
+ // Calculate the sum of squared deviation.
+ for i := 0; i < len(hist.Bounds); i++ {
+ // Assume all points in the bucket occur at the middle of the bucket range
+ middleOfBucket := (prevBound + hist.Bounds[i]) / 2
+ dist.SumOfSquaredDeviation += float64(dist.BucketCounts[i]) * (middleOfBucket - dist.Mean) * (middleOfBucket - dist.Mean)
+ prevBound = hist.Bounds[i]
+ }
+ // The infinity bucket is an implicit +Inf bound after the list of explicit bounds.
+ // Assume points in the infinity bucket are at the top of the previous bucket
+ middleOfInfBucket := prevBound
+ if len(dist.BucketCounts) > 0 {
+ dist.SumOfSquaredDeviation += float64(dist.BucketCounts[len(dist.BucketCounts)-1]) * (middleOfInfBucket - dist.Mean) * (middleOfInfBucket - dist.Mean)
+ }
+}
+
+func numberDataPointToValue[N int64 | float64](
+ point metricdata.DataPoint[N],
+) (*monitoringpb.TypedValue, googlemetricpb.MetricDescriptor_ValueType) {
+ switch v := any(point.Value).(type) {
+ case int64:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: v,
+ }},
+ googlemetricpb.MetricDescriptor_INT64
+ case float64:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v,
+ }},
+ googlemetricpb.MetricDescriptor_DOUBLE
+ }
+ // It is impossible to reach this statement
+ return nil, googlemetricpb.MetricDescriptor_INT64
+}
+
+// https://github.com/googleapis/googleapis/blob/c4c562f89acce603fb189679836712d08c7f8584/google/api/metric.proto#L149
+//
+// > The label key name must follow:
+// >
+// > * Only upper and lower-case letters, digits and underscores (_) are
+// > allowed.
+// > * Label name must start with a letter or digit.
+// > * The maximum length of a label name is 100 characters.
+//
+// Note: this does not truncate if a label is too long.
+func normalizeLabelKey(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+ s = strings.Map(sanitizeRune, s)
+ if unicode.IsDigit(rune(s[0])) {
+ s = "key_" + s
+ }
+ return s
+}
+
+// converts anything that is not a letter or digit to an underscore.
+func sanitizeRune(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ // Everything else turns into an underscore
+ return '_'
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go
new file mode 100644
index 0000000000000..11b96067d557d
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go
@@ -0,0 +1,201 @@
+// Copyright 2020-2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+ apioption "google.golang.org/api/option"
+)
+
+var userAgent = fmt.Sprintf("opentelemetry-go %s; google-cloud-metric-exporter %s", otel.Version(), Version())
+
+// MonitoredResourceDescription is the struct which holds information required to map OTel resource to specific
+// Google Cloud MonitoredResource.
+type MonitoredResourceDescription struct {
+ mrLabels map[string]struct{}
+ mrType string
+}
+
+// Option is function type that is passed to the exporter initialization function.
+type Option func(*options)
+
+// options is the struct to hold options for metricExporter and its client instance.
+type options struct {
+ // context allows you to provide a custom context for API calls.
+ //
+ // This context will be used several times: first, to create Cloud Monitoring
+ // clients, and then every time a new batch of metrics needs to be uploaded.
+ //
+ // If unset, context.Background() will be used.
+ context context.Context
+ // metricDescriptorTypeFormatter is the custom formtter for the MetricDescriptor.Type.
+ // By default, the format string is "workload.googleapis.com/[metric name]".
+ metricDescriptorTypeFormatter func(metricdata.Metrics) string
+ // resourceAttributeFilter determinies which resource attributes to
+ // add to metrics as metric labels. By default, it adds service.name,
+ // service.namespace, and service.instance.id.
+ resourceAttributeFilter attribute.Filter
+ // monitoredResourceDescription sets whether to attempt mapping the OTel Resource to a specific
+ // Google Cloud Monitored Resource. When provided, the exporter attempts to map only to the provided
+ // monitored resource type.
+ monitoredResourceDescription MonitoredResourceDescription
+ // projectID is the identifier of the Cloud Monitoring
+ // project the user is uploading the stats data to.
+ // If not set, this will default to your "Application Default Credentials".
+ // For details see: https://developers.google.com/accounts/docs/application-default-credentials.
+ //
+ // It will be used in the project_id label of a Google Cloud Monitoring monitored
+ // resource if the resource does not inherently belong to a specific
+ // project, e.g. on-premise resource like k8s_container or generic_task.
+ projectID string
+ // compression enables gzip compression on gRPC calls.
+ compression string
+ // monitoringClientOptions are additional options to be passed
+ // to the underlying Stackdriver Monitoring API client.
+ // Optional.
+ monitoringClientOptions []apioption.ClientOption
+ // destinationProjectQuota sets whether the request should use quota from
+ // the destination project for the request.
+ destinationProjectQuota bool
+
+ // disableCreateMetricDescriptors disables automatic MetricDescriptor creation
+ disableCreateMetricDescriptors bool
+
+ // enableSumOfSquaredDeviation enables calculation of an estimated sum of squared
+ // deviation. It isn't correct, so we don't send it by default.
+ enableSumOfSquaredDeviation bool
+
+ // createServiceTimeSeries sets whether to create timeseries using `CreateServiceTimeSeries`.
+ // Implicitly, this sets `disableCreateMetricDescriptors` to true.
+ createServiceTimeSeries bool
+}
+
+// WithProjectID sets Google Cloud Platform project as projectID.
+// Without using this option, it automatically detects the project ID
+// from the default credential detection process.
+// Please find the detailed order of the default credentail detection proecess on the doc:
+// https://godoc.org/golang.org/x/oauth2/google#FindDefaultCredentials
+func WithProjectID(id string) func(o *options) {
+ return func(o *options) {
+ o.projectID = id
+ }
+}
+
+// WithDestinationProjectQuota enables per-request usage of the destination
+// project's quota. For example, when setting gcp.project.id on a metric.
+func WithDestinationProjectQuota() func(o *options) {
+ return func(o *options) {
+ o.destinationProjectQuota = true
+ }
+}
+
+// WithMonitoringClientOptions add the options for Cloud Monitoring client instance.
+// Available options are defined in.
+func WithMonitoringClientOptions(opts ...apioption.ClientOption) func(o *options) {
+ return func(o *options) {
+ o.monitoringClientOptions = append(o.monitoringClientOptions, opts...)
+ }
+}
+
+// WithMetricDescriptorTypeFormatter sets the custom formatter for MetricDescriptor.
+// Note that the format has to follow the convention defined in the official document.
+// The default is "workload.googleapis.com/[metric name]".
+// ref. https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom_metric_names
+func WithMetricDescriptorTypeFormatter(f func(metricdata.Metrics) string) func(o *options) {
+ return func(o *options) {
+ o.metricDescriptorTypeFormatter = f
+ }
+}
+
+// WithFilteredResourceAttributes determinies which resource attributes to
+// add to metrics as metric labels. By default, it adds service.name,
+// service.namespace, and service.instance.id. This is recommended to avoid
+// writing duplicate timeseries against the same monitored resource. Use
+// WithFilteredResourceAttributes(NoAttributes()) to disable the addition of
+// resource attributes to metric labels.
+func WithFilteredResourceAttributes(filter attribute.Filter) func(o *options) {
+ return func(o *options) {
+ o.resourceAttributeFilter = filter
+ }
+}
+
+// DefaultResourceAttributesFilter is the default filter applied to resource
+// attributes.
+func DefaultResourceAttributesFilter(kv attribute.KeyValue) bool {
+ return (kv.Key == semconv.ServiceNameKey ||
+ kv.Key == semconv.ServiceNamespaceKey ||
+ kv.Key == semconv.ServiceInstanceIDKey) && len(kv.Value.AsString()) > 0
+}
+
+// NoAttributes can be passed to WithFilteredResourceAttributes to disable
+// adding resource attributes as metric labels.
+func NoAttributes(attribute.KeyValue) bool {
+ return false
+}
+
+// WithDisableCreateMetricDescriptors will disable the automatic creation of
+// MetricDescriptors when an unknown metric is set to be exported.
+func WithDisableCreateMetricDescriptors() func(o *options) {
+ return func(o *options) {
+ o.disableCreateMetricDescriptors = true
+ }
+}
+
+// WithCompression sets the compression to use for gRPC requests.
+func WithCompression(c string) func(o *options) {
+ return func(o *options) {
+ o.compression = c
+ }
+}
+
+// WithSumOfSquaredDeviation sets the SumOfSquaredDeviation field on histograms.
+// It is an estimate, and is not the actual sum of squared deviations.
+func WithSumOfSquaredDeviation() func(o *options) {
+ return func(o *options) {
+ o.enableSumOfSquaredDeviation = true
+ }
+}
+
+// WithCreateServiceTimeSeries configures the exporter to use `CreateServiceTimeSeries` for creating timeseries.
+// If this is used, metric descriptors are not exported.
+func WithCreateServiceTimeSeries() func(o *options) {
+ return func(o *options) {
+ o.createServiceTimeSeries = true
+ o.disableCreateMetricDescriptors = true
+ }
+}
+
+// WithMonitoredResourceDescription configures the exporter to attempt to map the OpenTelemetry Resource to the provided
+// Google MonitoredResource. The provided mrLabels would be searched for in the OpenTelemetry Resource Attributes and if
+// found, would be included in the MonitoredResource labels.
+func WithMonitoredResourceDescription(mrType string, mrLabels []string) func(o *options) {
+ return func(o *options) {
+ mrLabelSet := make(map[string]struct{})
+ for _, label := range mrLabels {
+ mrLabelSet[label] = struct{}{}
+ }
+ o.monitoredResourceDescription = MonitoredResourceDescription{
+ mrType: mrType,
+ mrLabels: mrLabelSet,
+ }
+ }
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go
new file mode 100644
index 0000000000000..e31119fc1293f
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go
@@ -0,0 +1,21 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// Version is the current release version of the OpenTelemetry
+// Operations Metric Exporter in use.
+func Version() string {
+ return "0.48.1"
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go
new file mode 100644
index 0000000000000..4b5af517fe62d
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go
@@ -0,0 +1,286 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resourcemapping
+
+import (
+ "strings"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+const (
+ ProjectIDAttributeKey = "gcp.project.id"
+
+ awsAccount = "aws_account"
+ awsEc2Instance = "aws_ec2_instance"
+ clusterName = "cluster_name"
+ containerName = "container_name"
+ gceInstance = "gce_instance"
+ genericNode = "generic_node"
+ genericTask = "generic_task"
+ instanceID = "instance_id"
+ job = "job"
+ k8sCluster = "k8s_cluster"
+ k8sContainer = "k8s_container"
+ k8sNode = "k8s_node"
+ k8sPod = "k8s_pod"
+ location = "location"
+ namespace = "namespace"
+ namespaceName = "namespace_name"
+ nodeID = "node_id"
+ nodeName = "node_name"
+ podName = "pod_name"
+ region = "region"
+ taskID = "task_id"
+ zone = "zone"
+ gaeInstance = "gae_instance"
+ gaeApp = "gae_app"
+ gaeModuleID = "module_id"
+ gaeVersionID = "version_id"
+ cloudRunRevision = "cloud_run_revision"
+ cloudFunction = "cloud_function"
+ cloudFunctionName = "function_name"
+ serviceName = "service_name"
+ configurationName = "configuration_name"
+ revisionName = "revision_name"
+ bmsInstance = "baremetalsolution.googleapis.com/Instance"
+ unknownServicePrefix = "unknown_service"
+)
+
+var (
+ // monitoredResourceMappings contains mappings of GCM resource label keys onto mapping config from OTel
+ // resource for a given monitored resource type.
+ monitoredResourceMappings = map[string]map[string]struct {
+ // If none of the otelKeys are present in the Resource, fallback to this literal value
+ fallbackLiteral string
+ // OTel resource keys to try and populate the resource label from. For entries with
+ // multiple OTel resource keys, the keys' values will be coalesced in order until there
+ // is a non-empty value.
+ otelKeys []string
+ }{
+ gceInstance: {
+ zone: {otelKeys: []string{string(semconv.CloudAvailabilityZoneKey)}},
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ },
+ k8sContainer: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}},
+ podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}},
+ containerName: {otelKeys: []string{string(semconv.K8SContainerNameKey)}},
+ },
+ k8sPod: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}},
+ podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}},
+ },
+ k8sNode: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ nodeName: {otelKeys: []string{string(semconv.K8SNodeNameKey)}},
+ },
+ k8sCluster: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ },
+ gaeInstance: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}},
+ gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}},
+ instanceID: {otelKeys: []string{string(semconv.FaaSInstanceKey)}},
+ },
+ gaeApp: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}},
+ gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}},
+ },
+ awsEc2Instance: {
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ region: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ },
+ awsAccount: {otelKeys: []string{string(semconv.CloudAccountIDKey)}},
+ },
+ bmsInstance: {
+ location: {otelKeys: []string{string(semconv.CloudRegionKey)}},
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ },
+ genericTask: {
+ location: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ fallbackLiteral: "global",
+ },
+ namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}},
+ job: {otelKeys: []string{string(semconv.ServiceNameKey), string(semconv.FaaSNameKey)}},
+ taskID: {otelKeys: []string{string(semconv.ServiceInstanceIDKey), string(semconv.FaaSInstanceKey)}},
+ },
+ genericNode: {
+ location: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ fallbackLiteral: "global",
+ },
+ namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}},
+ nodeID: {otelKeys: []string{string(semconv.HostIDKey), string(semconv.HostNameKey)}},
+ },
+ }
+)
+
+// ReadOnlyAttributes is an interface to abstract between pulling attributes from PData library or OTEL SDK.
+type ReadOnlyAttributes interface {
+ GetString(string) (string, bool)
+}
+
+// ResourceAttributesToLoggingMonitoredResource converts from a set of OTEL resource attributes into a
+// GCP monitored resource type and label set for Cloud Logging.
+// E.g.
+// This may output `gce_instance` type with appropriate labels.
+func ResourceAttributesToLoggingMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey))
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPAppEngine.Value.AsString():
+ return createMonitoredResource(gaeApp, attrs)
+ default:
+ return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs)
+ }
+}
+
+// ResourceAttributesToMonitoringMonitoredResource converts from a set of OTEL resource attributes into a
+// GCP monitored resource type and label set for Cloud Monitoring
+// E.g.
+// This may output `gce_instance` type with appropriate labels.
+func ResourceAttributesToMonitoringMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey))
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPAppEngine.Value.AsString():
+ return createMonitoredResource(gaeInstance, attrs)
+ default:
+ return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs)
+ }
+}
+
+func commonResourceAttributesToMonitoredResource(cloudPlatform string, attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPComputeEngine.Value.AsString():
+ return createMonitoredResource(gceInstance, attrs)
+ case semconv.CloudPlatformAWSEC2.Value.AsString():
+ return createMonitoredResource(awsEc2Instance, attrs)
+ // TODO(alex-basinov): replace this string literal with semconv.CloudPlatformGCPBareMetalSolution
+ // once https://github.com/open-telemetry/semantic-conventions/pull/64 makes its way
+ // into the semconv module.
+ case "gcp_bare_metal_solution":
+ return createMonitoredResource(bmsInstance, attrs)
+ default:
+ // if k8s.cluster.name is set, pattern match for various k8s resources.
+ // this will also match non-cloud k8s platforms like minikube.
+ if _, ok := attrs.GetString(string(semconv.K8SClusterNameKey)); ok {
+ // Try for most to least specific k8s_container, k8s_pod, etc
+ if _, ok := attrs.GetString(string(semconv.K8SContainerNameKey)); ok {
+ return createMonitoredResource(k8sContainer, attrs)
+ } else if _, ok := attrs.GetString(string(semconv.K8SPodNameKey)); ok {
+ return createMonitoredResource(k8sPod, attrs)
+ } else if _, ok := attrs.GetString(string(semconv.K8SNodeNameKey)); ok {
+ return createMonitoredResource(k8sNode, attrs)
+ } else {
+ return createMonitoredResource(k8sCluster, attrs)
+ }
+ }
+
+ // Fallback to generic_task
+ _, hasServiceName := attrs.GetString(string(semconv.ServiceNameKey))
+ _, hasFaaSName := attrs.GetString(string(semconv.FaaSNameKey))
+ _, hasServiceInstanceID := attrs.GetString(string(semconv.ServiceInstanceIDKey))
+ _, hasFaaSInstance := attrs.GetString(string(semconv.FaaSInstanceKey))
+ if (hasServiceName && hasServiceInstanceID) || (hasFaaSInstance && hasFaaSName) {
+ return createMonitoredResource(genericTask, attrs)
+ }
+
+ // Everything else fallback to generic_node
+ return createMonitoredResource(genericNode, attrs)
+ }
+}
+
+func createMonitoredResource(
+ monitoredResourceType string,
+ resourceAttrs ReadOnlyAttributes,
+) *monitoredrespb.MonitoredResource {
+ mappings := monitoredResourceMappings[monitoredResourceType]
+ mrLabels := make(map[string]string, len(mappings))
+
+ for mrKey, mappingConfig := range mappings {
+ mrValue := ""
+ ok := false
+ // Coalesce the possible keys in order
+ for _, otelKey := range mappingConfig.otelKeys {
+ mrValue, ok = resourceAttrs.GetString(otelKey)
+ if mrValue != "" && !strings.HasPrefix(mrValue, unknownServicePrefix) {
+ break
+ }
+ }
+ if mrValue == "" && contains(mappingConfig.otelKeys, string(semconv.ServiceNameKey)) {
+ // the service name started with unknown_service, and was ignored above
+ mrValue, ok = resourceAttrs.GetString(string(semconv.ServiceNameKey))
+ }
+ if !ok || mrValue == "" {
+ mrValue = mappingConfig.fallbackLiteral
+ }
+ mrLabels[mrKey] = sanitizeUTF8(mrValue)
+ }
+ return &monitoredrespb.MonitoredResource{
+ Type: monitoredResourceType,
+ Labels: mrLabels,
+ }
+}
+
+func contains(list []string, element string) bool {
+ for _, item := range list {
+ if item == element {
+ return true
+ }
+ }
+ return false
+}
+
+func sanitizeUTF8(s string) string {
+ return strings.ToValidUTF8(s, "�")
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/LICENSE b/vendor/github.com/fsouza/fake-gcs-server/LICENSE
index 529faa468606e..a619aaecef9d1 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/LICENSE
+++ b/vendor/github.com/fsouza/fake-gcs-server/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2017-2019, Francisco Souza
+Copyright (c) Francisco Souza
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go
index e2fa2ad3716ee..4026f1a4a0deb 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go
@@ -6,49 +6,161 @@ package fakestorage
import (
"encoding/json"
+ "errors"
+ "fmt"
+ "io"
"net/http"
+ "regexp"
+ "github.com/fsouza/fake-gcs-server/internal/backend"
"github.com/gorilla/mux"
)
+var bucketRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]$`)
+
// CreateBucket creates a bucket inside the server, so any API calls that
// require the bucket name will recognize this bucket.
//
// If the bucket already exists, this method does nothing.
+//
+// Deprecated: use CreateBucketWithOpts.
func (s *Server) CreateBucket(name string) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- err := s.backend.CreateBucket(name)
+ err := s.backend.CreateBucket(name, backend.BucketAttrs{VersioningEnabled: false, DefaultEventBasedHold: false})
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (s *Server) updateBucket(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+ attrsToUpdate := getBucketAttrsToUpdate(r.Body)
+ err := s.backend.UpdateBucket(bucketName, attrsToUpdate)
if err != nil {
panic(err)
}
+ return jsonResponse{}
+}
+
+func getBucketAttrsToUpdate(body io.ReadCloser) backend.BucketAttrs {
+ var data struct {
+ DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"`
+ Versioning bucketVersioning `json:"versioning,omitempty"`
+ }
+ err := json.NewDecoder(body).Decode(&data)
+ if err != nil {
+ panic(err)
+ }
+ attrsToUpdate := backend.BucketAttrs{
+ DefaultEventBasedHold: data.DefaultEventBasedHold,
+ VersioningEnabled: data.Versioning.Enabled,
+ }
+ return attrsToUpdate
+}
+
+// CreateBucketOpts defines the properties of a bucket you can create with
+// CreateBucketWithOpts.
+type CreateBucketOpts struct {
+ Name string
+ VersioningEnabled bool
+ DefaultEventBasedHold bool
+}
+
+// CreateBucketWithOpts creates a bucket inside the server, so any API calls that
+// require the bucket name will recognize this bucket. Use CreateBucketOpts to
+// customize the options for this bucket
+//
+// If the underlying backend returns an error, this method panics.
+func (s *Server) CreateBucketWithOpts(opts CreateBucketOpts) {
+ err := s.backend.CreateBucket(opts.Name, backend.BucketAttrs{VersioningEnabled: opts.VersioningEnabled, DefaultEventBasedHold: opts.DefaultEventBasedHold})
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (s *Server) createBucketByPost(r *http.Request) jsonResponse {
+ // Minimal version of Bucket from google.golang.org/api/storage/v1
+
+ var data struct {
+ Name string `json:"name,omitempty"`
+ Versioning *bucketVersioning `json:"versioning,omitempty"`
+ DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"`
+ }
+
+ // Read the bucket props from the request body JSON
+ decoder := json.NewDecoder(r.Body)
+ if err := decoder.Decode(&data); err != nil {
+ return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest}
+ }
+ name := data.Name
+ versioning := false
+ if data.Versioning != nil {
+ versioning = data.Versioning.Enabled
+ }
+ defaultEventBasedHold := data.DefaultEventBasedHold
+ if err := validateBucketName(name); err != nil {
+ return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest}
+ }
+
+ _, err := s.backend.GetBucket(name)
+ if err == nil {
+ return jsonResponse{
+ errorMessage: fmt.Sprintf(
+ "A Cloud Storage bucket named '%s' already exists. "+
+ "Try another name. Bucket names must be globally unique "+
+ "across all Google Cloud projects, including those "+
+ "outside of your organization.", name),
+ status: http.StatusConflict,
+ }
+ }
+
+ // Create the named bucket
+ if err := s.backend.CreateBucket(name, backend.BucketAttrs{VersioningEnabled: versioning, DefaultEventBasedHold: defaultEventBasedHold}); err != nil {
+ return jsonResponse{errorMessage: err.Error()}
+ }
+
+ // Return the created bucket:
+ bucket, err := s.backend.GetBucket(name)
+ if err != nil {
+ return jsonResponse{errorMessage: err.Error()}
+ }
+ return jsonResponse{data: newBucketResponse(bucket, s.options.BucketsLocation)}
}
-func (s *Server) listBuckets(w http.ResponseWriter, r *http.Request) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
+func (s *Server) listBuckets(r *http.Request) jsonResponse {
+ buckets, err := s.backend.ListBuckets()
+ if err != nil {
+ return jsonResponse{errorMessage: err.Error()}
+ }
+ return jsonResponse{data: newListBucketsResponse(buckets, s.options.BucketsLocation)}
+}
- bucketNames, err := s.backend.ListBuckets()
+func (s *Server) getBucket(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+ bucket, err := s.backend.GetBucket(bucketName)
+ if err != nil {
+ return jsonResponse{status: http.StatusNotFound}
+ }
+ return jsonResponse{data: newBucketResponse(bucket, s.options.BucketsLocation)}
+}
+
+func (s *Server) deleteBucket(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+ err := s.backend.DeleteBucket(bucketName)
+ if err == backend.BucketNotFound {
+ return jsonResponse{status: http.StatusNotFound}
+ }
+ if err == backend.BucketNotEmpty {
+ return jsonResponse{status: http.StatusPreconditionFailed, errorMessage: err.Error()}
+ }
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{status: http.StatusInternalServerError, errorMessage: err.Error()}
}
- resp := newListBucketsResponse(bucketNames)
- json.NewEncoder(w).Encode(resp)
+ return jsonResponse{}
}
-func (s *Server) getBucket(w http.ResponseWriter, r *http.Request) {
- bucketName := mux.Vars(r)["bucketName"]
- s.mtx.RLock()
- defer s.mtx.RUnlock()
- encoder := json.NewEncoder(w)
- if err := s.backend.GetBucket(bucketName); err != nil {
- w.WriteHeader(http.StatusNotFound)
- err := newErrorResponse(http.StatusNotFound, "Not found", nil)
- encoder.Encode(err)
- return
+func validateBucketName(bucketName string) error {
+ if !bucketRegexp.MatchString(bucketName) {
+ return errors.New("invalid bucket name")
}
- resp := newBucketResponse(bucketName)
- w.WriteHeader(http.StatusOK)
- encoder.Encode(resp)
+ return nil
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go
new file mode 100644
index 0000000000000..a57d154279a5e
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go
@@ -0,0 +1,30 @@
+package fakestorage
+
+import (
+ "encoding/json"
+ "net/http"
+)
+
+func (s *Server) updateServerConfig(r *http.Request) jsonResponse {
+ var configOptions struct {
+ ExternalUrl string `json:"externalUrl,omitempty"`
+ PublicHost string `json:"publicHost,omitempty"`
+ }
+ err := json.NewDecoder(r.Body).Decode(&configOptions)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "Update server config payload can not be parsed.",
+ }
+ }
+
+ if configOptions.ExternalUrl != "" {
+ s.externalURL = configOptions.ExternalUrl
+ }
+
+ if configOptions.PublicHost != "" {
+ s.publicHost = configOptions.PublicHost
+ }
+
+ return jsonResponse{status: http.StatusOK}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go
new file mode 100644
index 0000000000000..99e8ce7d4ccab
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go
@@ -0,0 +1,84 @@
+package fakestorage
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+ "os"
+ "syscall"
+
+ "github.com/fsouza/fake-gcs-server/internal/backend"
+)
+
+type jsonResponse struct {
+ status int
+ header http.Header
+ data any
+ errorMessage string
+}
+
+type jsonHandler = func(r *http.Request) jsonResponse
+
+func jsonToHTTPHandler(h jsonHandler) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ resp := h(r)
+ w.Header().Set("Content-Type", "application/json")
+ for name, values := range resp.header {
+ for _, value := range values {
+ w.Header().Add(name, value)
+ }
+ }
+
+ status := resp.getStatus()
+ var data any
+ if status > 399 {
+ data = newErrorResponse(status, resp.getErrorMessage(status), resp.getErrorList(status))
+ } else {
+ data = resp.data
+ }
+
+ w.WriteHeader(status)
+ json.NewEncoder(w).Encode(data)
+ }
+}
+
+func (r *jsonResponse) getStatus() int {
+ if r.status > 0 {
+ return r.status
+ }
+ if r.errorMessage != "" {
+ return http.StatusInternalServerError
+ }
+ return http.StatusOK
+}
+
+func (r *jsonResponse) getErrorMessage(status int) string {
+ if r.errorMessage != "" {
+ return r.errorMessage
+ }
+ return http.StatusText(status)
+}
+
+func (r *jsonResponse) getErrorList(status int) []apiError {
+ if status == http.StatusOK {
+ return nil
+ } else {
+ return []apiError{{
+ Domain: "global",
+ Reason: http.StatusText(status),
+ Message: r.getErrorMessage(status),
+ }}
+ }
+}
+
+func errToJsonResponse(err error) jsonResponse {
+ status := 0
+ var pathError *os.PathError
+ if errors.As(err, &pathError) && pathError.Err == syscall.ENAMETOOLONG {
+ status = http.StatusBadRequest
+ }
+ if err == backend.PreConditionFailed {
+ status = http.StatusPreconditionFailed
+ }
+ return jsonResponse{errorMessage: err.Error(), status: status}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go
index afaa2efeac76a..b228c787ae682 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go
@@ -7,16 +7,14 @@ package fakestorage
import (
"net/http"
"net/http/httptest"
-
- "github.com/gorilla/mux"
)
type muxTransport struct {
- router *mux.Router
+ handler http.Handler
}
func (t *muxTransport) RoundTrip(r *http.Request) (*http.Response, error) {
w := httptest.NewRecorder()
- t.router.ServeHTTP(w, r)
+ t.handler.ServeHTTP(w, r)
return w.Result(), nil
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go
index bc1d472f36e30..b229a452331e6 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go
@@ -5,84 +5,357 @@
package fakestorage
import (
+ "bytes"
+ "compress/gzip"
"encoding/json"
+ "encoding/xml"
+ "errors"
"fmt"
+ "io"
"net/http"
+ "slices"
"sort"
"strconv"
"strings"
+ "time"
+ "cloud.google.com/go/storage"
"github.com/fsouza/fake-gcs-server/internal/backend"
+ "github.com/fsouza/fake-gcs-server/internal/notification"
"github.com/gorilla/mux"
)
-// Object represents the object that is stored within the fake server.
-type Object struct {
- BucketName string `json:"-"`
- Name string `json:"name"`
- Content []byte `json:"-"`
+var errInvalidGeneration = errors.New("invalid generation ID")
+
+// ObjectAttrs returns only the meta-data about an object without its contents.
+type ObjectAttrs struct {
+ BucketName string
+ Name string
+ Size int64
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ CacheControl string
// Crc32c checksum of Content. calculated by server when it's upload methods are used.
- Crc32c string `json:"crc32c,omitempty"`
- Md5Hash string `json:"md5hash,omitempty"`
+ Crc32c string
+ Md5Hash string
+ Etag string
+ ACL []storage.ACLRule
+ // Dates and generation can be manually injected, so you can do assertions on them,
+ // or let us fill these fields for you
+ Created time.Time
+ Updated time.Time
+ Deleted time.Time
+ CustomTime time.Time
+ Generation int64
+ Metadata map[string]string
}
-func (o *Object) id() string {
+func (o *ObjectAttrs) id() string {
return o.BucketName + "/" + o.Name
}
-type objectList []Object
+type jsonObject struct {
+ BucketName string `json:"bucket"`
+ Name string `json:"name"`
+ Size int64 `json:"size,string"`
+ ContentType string `json:"contentType"`
+ ContentEncoding string `json:"contentEncoding"`
+ ContentDisposition string `json:"contentDisposition"`
+ Crc32c string `json:"crc32c,omitempty"`
+ Md5Hash string `json:"md5Hash,omitempty"`
+ Etag string `json:"etag,omitempty"`
+ ACL []aclRule `json:"acl,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Updated time.Time `json:"updated,omitempty"`
+ Deleted time.Time `json:"deleted,omitempty"`
+ CustomTime time.Time `json:"customTime,omitempty"`
+ Generation int64 `json:"generation,omitempty,string"`
+ Metadata map[string]string `json:"metadata,omitempty"`
+}
+
+// MarshalJSON for ObjectAttrs to use ACLRule instead of storage.ACLRule
+func (o ObjectAttrs) MarshalJSON() ([]byte, error) {
+ temp := jsonObject{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ Size: o.Size,
+ Crc32c: o.Crc32c,
+ Md5Hash: o.Md5Hash,
+ Etag: o.Etag,
+ Created: o.Created,
+ Updated: o.Updated,
+ Deleted: o.Deleted,
+ CustomTime: o.CustomTime,
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ }
+ temp.ACL = make([]aclRule, len(o.ACL))
+ for i, ACL := range o.ACL {
+ temp.ACL[i] = aclRule(ACL)
+ }
+ return json.Marshal(temp)
+}
+
+// UnmarshalJSON for ObjectAttrs to use ACLRule instead of storage.ACLRule
+func (o *ObjectAttrs) UnmarshalJSON(data []byte) error {
+ var temp jsonObject
+ if err := json.Unmarshal(data, &temp); err != nil {
+ return err
+ }
+ o.BucketName = temp.BucketName
+ o.Name = temp.Name
+ o.ContentType = temp.ContentType
+ o.ContentEncoding = temp.ContentEncoding
+ o.ContentDisposition = temp.ContentDisposition
+ o.Size = temp.Size
+ o.Crc32c = temp.Crc32c
+ o.Md5Hash = temp.Md5Hash
+ o.Etag = temp.Etag
+ o.Created = temp.Created
+ o.Updated = temp.Updated
+ o.Deleted = temp.Deleted
+ o.Generation = temp.Generation
+ o.Metadata = temp.Metadata
+ o.CustomTime = temp.CustomTime
+ o.ACL = make([]storage.ACLRule, len(temp.ACL))
+ for i, ACL := range temp.ACL {
+ o.ACL[i] = storage.ACLRule(ACL)
+ }
+
+ return nil
+}
+
+// Object represents an object that is stored within the fake server. The
+// content of this type is stored is buffered, i.e. it's stored in memory.
+// Use StreamingObject to stream the content from a reader, e.g a file.
+type Object struct {
+ ObjectAttrs
+ Content []byte `json:"-"`
+}
+
+type noopSeekCloser struct {
+ io.ReadSeeker
+}
+
+func (n noopSeekCloser) Close() error {
+ return nil
+}
+
+func (o Object) StreamingObject() StreamingObject {
+ return StreamingObject{
+ ObjectAttrs: o.ObjectAttrs,
+ Content: noopSeekCloser{bytes.NewReader(o.Content)},
+ }
+}
+
+// StreamingObject is the streaming version of Object.
+type StreamingObject struct {
+ ObjectAttrs
+ Content io.ReadSeekCloser `json:"-"`
+}
+
+func (o *StreamingObject) Close() error {
+ if o != nil && o.Content != nil {
+ return o.Content.Close()
+ }
+ return nil
+}
+
+func (o *StreamingObject) BufferedObject() (Object, error) {
+ data, err := io.ReadAll(o.Content)
+ return Object{
+ ObjectAttrs: o.ObjectAttrs,
+ Content: data,
+ }, err
+}
+
+// ACLRule is an alias of storage.ACLRule to have custom JSON marshal
+type aclRule storage.ACLRule
+
+// ProjectTeam is an alias of storage.ProjectTeam to have custom JSON marshal
+type projectTeam storage.ProjectTeam
+
+// MarshalJSON for ACLRule to customize field names
+func (acl aclRule) MarshalJSON() ([]byte, error) {
+ temp := struct {
+ Entity storage.ACLEntity `json:"entity"`
+ EntityID string `json:"entityId"`
+ Role storage.ACLRole `json:"role"`
+ Domain string `json:"domain"`
+ Email string `json:"email"`
+ ProjectTeam *projectTeam `json:"projectTeam"`
+ }{
+ Entity: acl.Entity,
+ EntityID: acl.EntityID,
+ Role: acl.Role,
+ Domain: acl.Domain,
+ Email: acl.Email,
+ ProjectTeam: (*projectTeam)(acl.ProjectTeam),
+ }
+ return json.Marshal(temp)
+}
-func (o objectList) Len() int {
- return len(o)
+// UnmarshalJSON for ACLRule to customize field names
+func (acl *aclRule) UnmarshalJSON(data []byte) error {
+ temp := struct {
+ Entity storage.ACLEntity `json:"entity"`
+ EntityID string `json:"entityId"`
+ Role storage.ACLRole `json:"role"`
+ Domain string `json:"domain"`
+ Email string `json:"email"`
+ ProjectTeam *projectTeam `json:"projectTeam"`
+ }{}
+ if err := json.Unmarshal(data, &temp); err != nil {
+ return err
+ }
+ acl.Entity = temp.Entity
+ acl.EntityID = temp.EntityID
+ acl.Role = temp.Role
+ acl.Domain = temp.Domain
+ acl.Email = temp.Email
+ acl.ProjectTeam = (*storage.ProjectTeam)(temp.ProjectTeam)
+ return nil
}
-func (o objectList) Less(i int, j int) bool {
- return o[i].Name < o[j].Name
+// MarshalJSON for ProjectTeam to customize field names
+func (team projectTeam) MarshalJSON() ([]byte, error) {
+ temp := struct {
+ ProjectNumber string `json:"projectNumber"`
+ Team string `json:"team"`
+ }{
+ ProjectNumber: team.ProjectNumber,
+ Team: team.Team,
+ }
+ return json.Marshal(temp)
}
-func (o *objectList) Swap(i int, j int) {
- d := *o
- d[i], d[j] = d[j], d[i]
+// UnmarshalJSON for ProjectTeam to customize field names
+func (team *projectTeam) UnmarshalJSON(data []byte) error {
+ temp := struct {
+ ProjectNumber string `json:"projectNumber"`
+ Team string `json:"team"`
+ }{}
+ if err := json.Unmarshal(data, &temp); err != nil {
+ return err
+ }
+ team.ProjectNumber = temp.ProjectNumber
+ team.Team = temp.Team
+ return nil
}
-// CreateObject stores the given object internally.
+// CreateObject is the non-streaming version of CreateObjectStreaming.
//
-// If the bucket within the object doesn't exist, it also creates it. If the
-// object already exists, it overrides the object.
+// In addition to streaming, CreateObjectStreaming returns an error instead of
+// panicking when an error occurs.
func (s *Server) CreateObject(obj Object) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- err := s.createObject(obj)
+ err := s.CreateObjectStreaming(obj.StreamingObject())
if err != nil {
panic(err)
}
}
-func (s *Server) createObject(obj Object) error {
- return s.backend.CreateObject(toBackendObjects([]Object{obj})[0])
+// CreateObjectStreaming stores the given object internally.
+//
+// If the bucket within the object doesn't exist, it also creates it. If the
+// object already exists, it overwrites the object.
+func (s *Server) CreateObjectStreaming(obj StreamingObject) error {
+ obj, err := s.createObject(obj, backend.NoConditions{})
+ if err != nil {
+ return err
+ }
+ obj.Close()
+ return nil
+}
+
+func (s *Server) createObject(obj StreamingObject, conditions backend.Conditions) (StreamingObject, error) {
+ oldBackendObj, err := s.backend.GetObject(obj.BucketName, obj.Name)
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer oldBackendObj.Close() //lint:ignore SA5001 // see above
+
+ prevVersionExisted := err == nil
+
+ // The caller is responsible for closing the created object.
+ newBackendObj, err := s.backend.CreateObject(toBackendObjects([]StreamingObject{obj})[0], conditions)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ var newObjEventAttr map[string]string
+ if prevVersionExisted {
+ newObjEventAttr = map[string]string{
+ "overwroteGeneration": strconv.FormatInt(oldBackendObj.Generation, 10),
+ }
+
+ oldObjEventAttr := map[string]string{
+ "overwrittenByGeneration": strconv.FormatInt(newBackendObj.Generation, 10),
+ }
+
+ bucket, _ := s.backend.GetBucket(obj.BucketName)
+ if bucket.VersioningEnabled {
+ s.eventManager.Trigger(&oldBackendObj, notification.EventArchive, oldObjEventAttr)
+ } else {
+ s.eventManager.Trigger(&oldBackendObj, notification.EventDelete, oldObjEventAttr)
+ }
+ }
+
+ newObj := fromBackendObjects([]backend.StreamingObject{newBackendObj})[0]
+ s.eventManager.Trigger(&newBackendObj, notification.EventFinalize, newObjEventAttr)
+ return newObj, nil
+}
+
+type ListOptions struct {
+ Prefix string
+ Delimiter string
+ Versions bool
+ StartOffset string
+ EndOffset string
+ IncludeTrailingDelimiter bool
}
// ListObjects returns a sorted list of objects that match the given criteria,
// or an error if the bucket doesn't exist.
-func (s *Server) ListObjects(bucketName, prefix, delimiter string) ([]Object, []string, error) {
- s.mtx.RLock()
- defer s.mtx.RUnlock()
- backendObjects, err := s.backend.ListObjects(bucketName)
+//
+// Deprecated: use ListObjectsWithOptions.
+func (s *Server) ListObjects(bucketName, prefix, delimiter string, versions bool) ([]ObjectAttrs, []string, error) {
+ return s.ListObjectsWithOptions(bucketName, ListOptions{
+ Prefix: prefix,
+ Delimiter: delimiter,
+ Versions: versions,
+ })
+}
+
+func (s *Server) ListObjectsWithOptions(bucketName string, options ListOptions) ([]ObjectAttrs, []string, error) {
+ backendObjects, err := s.backend.ListObjects(bucketName, options.Prefix, options.Versions)
if err != nil {
return nil, nil, err
}
- objects := fromBackendObjects(backendObjects)
- olist := objectList(objects)
- sort.Sort(&olist)
- var respObjects []Object
+ objects := fromBackendObjectsAttrs(backendObjects)
+ slices.SortFunc(objects, func(left, right ObjectAttrs) int {
+ return strings.Compare(left.Name, right.Name)
+ })
+ var respObjects []ObjectAttrs
prefixes := make(map[string]bool)
- for _, obj := range olist {
- if strings.HasPrefix(obj.Name, prefix) {
- objName := strings.Replace(obj.Name, prefix, "", 1)
- delimPos := strings.Index(objName, delimiter)
- if delimiter != "" && delimPos > -1 {
- prefixes[obj.Name[:len(prefix)+delimPos+1]] = true
- } else {
+ for _, obj := range objects {
+ if !strings.HasPrefix(obj.Name, options.Prefix) {
+ continue
+ }
+ objName := strings.Replace(obj.Name, options.Prefix, "", 1)
+ delimPos := strings.Index(objName, options.Delimiter)
+ if options.Delimiter != "" && delimPos > -1 {
+ prefix := obj.Name[:len(options.Prefix)+delimPos+1]
+ if isInOffset(prefix, options.StartOffset, options.EndOffset) {
+ prefixes[prefix] = true
+ }
+ if options.IncludeTrailingDelimiter && obj.Name == prefix {
+ respObjects = append(respObjects, obj)
+ }
+ } else {
+ if isInOffset(obj.Name, options.StartOffset, options.EndOffset) {
respObjects = append(respObjects, obj)
}
}
@@ -95,143 +368,818 @@ func (s *Server) ListObjects(bucketName, prefix, delimiter string) ([]Object, []
return respObjects, respPrefixes, nil
}
-func toBackendObjects(objects []Object) []backend.Object {
- backendObjects := []backend.Object{}
+func isInOffset(name, startOffset, endOffset string) bool {
+ if endOffset != "" && startOffset != "" {
+ return strings.Compare(name, endOffset) < 0 && strings.Compare(name, startOffset) >= 0
+ } else if endOffset != "" {
+ return strings.Compare(name, endOffset) < 0
+ } else if startOffset != "" {
+ return strings.Compare(name, startOffset) >= 0
+ } else {
+ return true
+ }
+}
+
+func getCurrentIfZero(date time.Time) time.Time {
+ if date.IsZero() {
+ return time.Now()
+ }
+ return date
+}
+
+func toBackendObjects(objects []StreamingObject) []backend.StreamingObject {
+ backendObjects := make([]backend.StreamingObject, 0, len(objects))
for _, o := range objects {
- backendObjects = append(backendObjects, backend.Object{
- BucketName: o.BucketName,
- Name: o.Name,
- Content: o.Content,
- Crc32c: o.Crc32c,
- Md5Hash: o.Md5Hash,
+ backendObjects = append(backendObjects, backend.StreamingObject{
+ ObjectAttrs: backend.ObjectAttrs{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ CacheControl: o.CacheControl,
+ ACL: o.ACL,
+ Created: getCurrentIfZero(o.Created).Format(timestampFormat),
+ Deleted: o.Deleted.Format(timestampFormat),
+ Updated: getCurrentIfZero(o.Updated).Format(timestampFormat),
+ CustomTime: o.CustomTime.Format(timestampFormat),
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ },
+ Content: o.Content,
})
}
return backendObjects
}
-func fromBackendObjects(objects []backend.Object) []Object {
- backendObjects := []Object{}
+func bufferedObjectsToBackendObjects(objects []Object) []backend.StreamingObject {
+ backendObjects := make([]backend.StreamingObject, 0, len(objects))
+ for _, bufferedObject := range objects {
+ o := bufferedObject.StreamingObject()
+ backendObjects = append(backendObjects, backend.StreamingObject{
+ ObjectAttrs: backend.ObjectAttrs{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ ACL: o.ACL,
+ Created: getCurrentIfZero(o.Created).Format(timestampFormat),
+ Deleted: o.Deleted.Format(timestampFormat),
+ Updated: getCurrentIfZero(o.Updated).Format(timestampFormat),
+ CustomTime: o.CustomTime.Format(timestampFormat),
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ Crc32c: o.Crc32c,
+ Md5Hash: o.Md5Hash,
+ Size: o.Size,
+ Etag: o.Etag,
+ },
+ Content: o.Content,
+ })
+ }
+ return backendObjects
+}
+
+func fromBackendObjects(objects []backend.StreamingObject) []StreamingObject {
+ backendObjects := make([]StreamingObject, 0, len(objects))
for _, o := range objects {
- backendObjects = append(backendObjects, Object{
- BucketName: o.BucketName,
- Name: o.Name,
- Content: o.Content,
- Crc32c: o.Crc32c,
- Md5Hash: o.Md5Hash,
+ backendObjects = append(backendObjects, StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ Size: o.Size,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ CacheControl: o.CacheControl,
+ Crc32c: o.Crc32c,
+ Md5Hash: o.Md5Hash,
+ Etag: o.Etag,
+ ACL: o.ACL,
+ Created: convertTimeWithoutError(o.Created),
+ Deleted: convertTimeWithoutError(o.Deleted),
+ Updated: convertTimeWithoutError(o.Updated),
+ CustomTime: convertTimeWithoutError(o.CustomTime),
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ },
+ Content: o.Content,
})
}
return backendObjects
}
-// GetObject returns the object with the given name in the given bucket, or an
-// error if the object doesn't exist.
+func fromBackendObjectsAttrs(objectAttrs []backend.ObjectAttrs) []ObjectAttrs {
+ oattrs := make([]ObjectAttrs, 0, len(objectAttrs))
+ for _, o := range objectAttrs {
+ oattrs = append(oattrs, ObjectAttrs{
+ BucketName: o.BucketName,
+ Name: o.Name,
+ Size: o.Size,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentDisposition: o.ContentDisposition,
+ CacheControl: o.CacheControl,
+ Crc32c: o.Crc32c,
+ Md5Hash: o.Md5Hash,
+ Etag: o.Etag,
+ ACL: o.ACL,
+ Created: convertTimeWithoutError(o.Created),
+ Deleted: convertTimeWithoutError(o.Deleted),
+ Updated: convertTimeWithoutError(o.Updated),
+ CustomTime: convertTimeWithoutError(o.CustomTime),
+ Generation: o.Generation,
+ Metadata: o.Metadata,
+ })
+ }
+ return oattrs
+}
+
+func convertTimeWithoutError(t string) time.Time {
+ r, _ := time.Parse(timestampFormat, t)
+ return r
+}
+
+// GetObject is the non-streaming version of GetObjectStreaming.
func (s *Server) GetObject(bucketName, objectName string) (Object, error) {
+ streamingObject, err := s.GetObjectStreaming(bucketName, objectName)
+ if err != nil {
+ return Object{}, err
+ }
+ return streamingObject.BufferedObject()
+}
+
+// GetObjectStreaming returns the object with the given name in the given
+// bucket, or an error if the object doesn't exist.
+func (s *Server) GetObjectStreaming(bucketName, objectName string) (StreamingObject, error) {
backendObj, err := s.backend.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0]
+ return obj, nil
+}
+
+// GetObjectWithGeneration is the non-streaming version of
+// GetObjectWithGenerationStreaming.
+func (s *Server) GetObjectWithGeneration(bucketName, objectName string, generation int64) (Object, error) {
+ streamingObject, err := s.GetObjectWithGenerationStreaming(bucketName, objectName, generation)
if err != nil {
return Object{}, err
}
- obj := fromBackendObjects([]backend.Object{backendObj})[0]
+ return streamingObject.BufferedObject()
+}
+
+// GetObjectWithGenerationStreaming returns the object with the given name and
+// given generation ID in the given bucket, or an error if the object doesn't
+// exist.
+//
+// If versioning is enabled, archived versions are considered.
+func (s *Server) GetObjectWithGenerationStreaming(bucketName, objectName string, generation int64) (StreamingObject, error) {
+ backendObj, err := s.backend.GetObjectWithGeneration(bucketName, objectName, generation)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0]
return obj, nil
}
-func (s *Server) listObjects(w http.ResponseWriter, r *http.Request) {
- bucketName := mux.Vars(r)["bucketName"]
- prefix := r.URL.Query().Get("prefix")
- delimiter := r.URL.Query().Get("delimiter")
- objs, prefixes, err := s.ListObjects(bucketName, prefix, delimiter)
- encoder := json.NewEncoder(w)
+func (s *Server) objectWithGenerationOnValidGeneration(bucketName, objectName, generationStr string) (StreamingObject, error) {
+ generation, err := strconv.ParseInt(generationStr, 10, 64)
+ if err != nil && generationStr != "" {
+ return StreamingObject{}, errInvalidGeneration
+ } else if generation > 0 {
+ return s.GetObjectWithGenerationStreaming(bucketName, objectName, generation)
+ }
+ return s.GetObjectStreaming(bucketName, objectName)
+}
+
+func (s *Server) listObjects(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+ objs, prefixes, err := s.ListObjectsWithOptions(bucketName, ListOptions{
+ Prefix: r.URL.Query().Get("prefix"),
+ Delimiter: r.URL.Query().Get("delimiter"),
+ Versions: r.URL.Query().Get("versions") == "true",
+ StartOffset: r.URL.Query().Get("startOffset"),
+ EndOffset: r.URL.Query().Get("endOffset"),
+ IncludeTrailingDelimiter: r.URL.Query().Get("includeTrailingDelimiter") == "true",
+ })
if err != nil {
- w.WriteHeader(http.StatusNotFound)
- errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil)
- encoder.Encode(errResp)
- return
+ return jsonResponse{status: http.StatusNotFound}
}
- encoder.Encode(newListObjectsResponse(objs, prefixes))
+ return jsonResponse{data: newListObjectsResponse(objs, prefixes, s.externalURL)}
}
-func (s *Server) getObject(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- encoder := json.NewEncoder(w)
- obj, err := s.GetObject(vars["bucketName"], vars["objectName"])
+func (s *Server) xmlListObjects(r *http.Request) xmlResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+
+ opts := ListOptions{
+ Prefix: r.URL.Query().Get("prefix"),
+ Delimiter: r.URL.Query().Get("delimiter"),
+ Versions: r.URL.Query().Get("versions") == "true",
+ }
+
+ objs, prefixes, err := s.ListObjectsWithOptions(bucketName, opts)
if err != nil {
- errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil)
- w.WriteHeader(http.StatusNotFound)
- encoder.Encode(errResp)
+ return xmlResponse{
+ status: http.StatusInternalServerError,
+ errorMessage: err.Error(),
+ }
+ }
+
+ result := ListBucketResult{
+ Name: bucketName,
+ Delimiter: opts.Delimiter,
+ Prefix: opts.Prefix,
+ KeyCount: len(objs),
+ }
+
+ if opts.Delimiter != "" {
+ for _, prefix := range prefixes {
+ result.CommonPrefixes = append(result.CommonPrefixes, CommonPrefix{Prefix: prefix})
+ }
+ }
+
+ for _, obj := range objs {
+ result.Contents = append(result.Contents, Contents{
+ Key: obj.Name,
+ Generation: obj.Generation,
+ Size: obj.Size,
+ LastModified: obj.Updated.Format(time.RFC3339),
+ ETag: ETag{Value: obj.Etag},
+ })
+ }
+
+ raw, err := xml.Marshal(result)
+ if err != nil {
+ return xmlResponse{
+ status: http.StatusInternalServerError,
+ errorMessage: err.Error(),
+ }
+ }
+
+ return xmlResponse{
+ status: http.StatusOK,
+ data: []byte(xml.Header + string(raw)),
+ }
+}
+
+func (s *Server) getObject(w http.ResponseWriter, r *http.Request) {
+ if alt := r.URL.Query().Get("alt"); alt == "media" || r.Method == http.MethodHead {
+ s.downloadObject(w, r)
return
}
- w.Header().Set("Accept-Ranges", "bytes")
- encoder.Encode(newObjectResponse(obj))
+
+ handler := jsonToHTTPHandler(func(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+
+ projection := storage.ProjectionNoACL
+ if r.URL.Query().Has("projection") {
+ switch value := strings.ToLower(r.URL.Query().Get("projection")); value {
+ case "full":
+ projection = storage.ProjectionFull
+ case "noacl":
+ projection = storage.ProjectionNoACL
+ default:
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: fmt.Sprintf("invalid projection: %q", value),
+ }
+ }
+ }
+
+ obj, err := s.objectWithGenerationOnValidGeneration(vars["bucketName"], vars["objectName"], r.FormValue("generation"))
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer obj.Close() //lint:ignore SA5001 // see above
+ if err != nil {
+ statusCode := http.StatusNotFound
+ var errMessage string
+ if errors.Is(err, errInvalidGeneration) {
+ statusCode = http.StatusBadRequest
+ errMessage = err.Error()
+ }
+ return jsonResponse{
+ status: statusCode,
+ errorMessage: errMessage,
+ }
+ }
+ header := make(http.Header)
+ header.Set("Accept-Ranges", "bytes")
+ return jsonResponse{
+ header: header,
+ data: newProjectedObjectResponse(obj.ObjectAttrs, s.externalURL, projection),
+ }
+ })
+
+ handler(w, r)
}
-func (s *Server) deleteObject(w http.ResponseWriter, r *http.Request) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- vars := mux.Vars(r)
- err := s.backend.DeleteObject(vars["bucketName"], vars["objectName"])
+func (s *Server) deleteObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"])
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer obj.Close() //lint:ignore SA5001 // see above
+ if err == nil {
+ err = s.backend.DeleteObject(vars["bucketName"], vars["objectName"])
+ }
if err != nil {
- errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil)
- w.WriteHeader(http.StatusNotFound)
- json.NewEncoder(w).Encode(errResp)
- return
+ return jsonResponse{status: http.StatusNotFound}
}
- w.WriteHeader(http.StatusOK)
+ bucket, _ := s.backend.GetBucket(obj.BucketName)
+ backendObj := toBackendObjects([]StreamingObject{obj})[0]
+ if bucket.VersioningEnabled {
+ s.eventManager.Trigger(&backendObj, notification.EventArchive, nil)
+ } else {
+ s.eventManager.Trigger(&backendObj, notification.EventDelete, nil)
+ }
+ return jsonResponse{}
}
-func (s *Server) rewriteObject(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- obj, err := s.GetObject(vars["sourceBucket"], vars["sourceObject"])
+func (s *Server) listObjectACL(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+
+ obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"])
if err != nil {
- http.Error(w, "not found", http.StatusNotFound)
- return
+ return jsonResponse{status: http.StatusNotFound}
+ }
+ defer obj.Close()
+
+ return jsonResponse{data: newACLListResponse(obj.ObjectAttrs)}
+}
+
+func (s *Server) setObjectACL(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+
+ obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"])
+ if err != nil {
+ return jsonResponse{status: http.StatusNotFound}
+ }
+ defer obj.Close()
+
+ var data struct {
+ Entity string
+ Role string
+ }
+
+ decoder := json.NewDecoder(r.Body)
+ if err := decoder.Decode(&data); err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: err.Error(),
+ }
+ }
+
+ entity := storage.ACLEntity(data.Entity)
+ role := storage.ACLRole(data.Role)
+ obj.ACL = []storage.ACLRule{{
+ Entity: entity,
+ Role: role,
+ }}
+
+ obj, err = s.createObject(obj, backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ defer obj.Close()
+
+ return jsonResponse{data: newACLListResponse(obj.ObjectAttrs)}
+}
+
+func (s *Server) rewriteObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ obj, err := s.objectWithGenerationOnValidGeneration(vars["sourceBucket"], vars["sourceObject"], r.FormValue("sourceGeneration"))
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer obj.Close() //lint:ignore SA5001 // see above
+ if err != nil {
+ statusCode := http.StatusNotFound
+ var errMessage string
+ if errors.Is(err, errInvalidGeneration) {
+ statusCode = http.StatusBadRequest
+ errMessage = err.Error()
+ }
+ return jsonResponse{errorMessage: errMessage, status: statusCode}
+ }
+
+ var metadata multipartMetadata
+ err = json.NewDecoder(r.Body).Decode(&metadata)
+ if err != nil && err != io.EOF { // The body is optional
+ return jsonResponse{errorMessage: "Invalid metadata", status: http.StatusBadRequest}
+ }
+
+ // Only supplied metadata overwrites the new object's metdata
+ if len(metadata.Metadata) == 0 {
+ metadata.Metadata = obj.Metadata
+ }
+ if metadata.ContentType == "" {
+ metadata.ContentType = obj.ContentType
+ }
+ if metadata.ContentEncoding == "" {
+ metadata.ContentEncoding = obj.ContentEncoding
+ }
+ if metadata.ContentDisposition == "" {
+ metadata.ContentDisposition = obj.ContentDisposition
}
+
dstBucket := vars["destinationBucket"]
- newObject := Object{
- BucketName: dstBucket,
- Name: vars["destinationObject"],
- Content: append([]byte(nil), obj.Content...),
- Crc32c: obj.Crc32c,
- Md5Hash: obj.Md5Hash,
+ newObject := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: dstBucket,
+ Name: vars["destinationObject"],
+ ACL: obj.ACL,
+ ContentType: metadata.ContentType,
+ ContentEncoding: metadata.ContentEncoding,
+ ContentDisposition: metadata.ContentDisposition,
+ Metadata: metadata.Metadata,
+ },
+ Content: obj.Content,
}
- s.CreateObject(newObject)
- w.Header().Set("Content-Type", "application/json")
- json.NewEncoder(w).Encode(newObjectRewriteResponse(newObject))
+
+ created, err := s.createObject(newObject, backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ defer created.Close()
+
+ if vars["copyType"] == "copyTo" {
+ return jsonResponse{data: newObjectResponse(created.ObjectAttrs, s.externalURL)}
+ }
+ return jsonResponse{data: newObjectRewriteResponse(created.ObjectAttrs, s.externalURL)}
}
func (s *Server) downloadObject(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- obj, err := s.GetObject(vars["bucketName"], vars["objectName"])
+ vars := unescapeMuxVars(mux.Vars(r))
+ obj, err := s.objectWithGenerationOnValidGeneration(vars["bucketName"], vars["objectName"], r.FormValue("generation"))
+ // Calling Close before checking err is okay on objects, and the object
+ // may need to be closed whether or not there's an error.
+ defer obj.Close() //lint:ignore SA5001 // see above
if err != nil {
- http.Error(w, "not found", http.StatusNotFound)
+ statusCode := http.StatusNotFound
+ message := http.StatusText(statusCode)
+ if errors.Is(err, errInvalidGeneration) {
+ statusCode = http.StatusBadRequest
+ message = err.Error()
+ }
+ http.Error(w, message, statusCode)
return
}
+
+ var content io.Reader
+ content = obj.Content
status := http.StatusOK
- start, end, content := s.handleRange(obj, r)
- if len(content) != len(obj.Content) {
+
+ transcoded := false
+ ranged := false
+ start := int64(0)
+ lastByte := int64(0)
+ satisfiable := true
+ contentLength := int64(0)
+
+ handledTranscoding := func() bool {
+ // This should also be false if the Cache-Control metadata field == "no-transform",
+ // but we don't currently support that field.
+ // See https://cloud.google.com/storage/docs/transcoding
+
+ if obj.ContentEncoding == "gzip" && !strings.Contains(r.Header.Get("accept-encoding"), "gzip") {
+ // GCS will transparently decompress gzipped content, see
+ // https://cloud.google.com/storage/docs/transcoding
+ // In this case, any Range header is ignored and the full content is returned.
+
+ // If the content is not a valid gzip file, ignore errors and continue
+ // without transcoding. Otherwise, return decompressed content.
+ gzipReader, err := gzip.NewReader(content)
+ if err == nil {
+ rawContent, err := io.ReadAll(gzipReader)
+ if err == nil {
+ transcoded = true
+ content = bytes.NewReader(rawContent)
+ contentLength = int64(len(rawContent))
+ obj.Size = contentLength
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ if !handledTranscoding() {
+ ranged, start, lastByte, satisfiable = s.handleRange(obj, r)
+ contentLength = lastByte - start + 1
+ }
+
+ if ranged && satisfiable {
+ _, err = obj.Content.Seek(start, io.SeekStart)
+ if err != nil {
+ http.Error(w, "could not seek", http.StatusInternalServerError)
+ return
+ }
+ content = io.LimitReader(obj.Content, contentLength)
status = http.StatusPartialContent
- w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, len(obj.Content)))
+ w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, lastByte, obj.Size))
}
w.Header().Set("Accept-Ranges", "bytes")
- w.Header().Set("Content-Length", strconv.Itoa(len(content)))
+ w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ w.Header().Set("X-Goog-Generation", strconv.FormatInt(obj.Generation, 10))
+ w.Header().Set("X-Goog-Hash", fmt.Sprintf("crc32c=%s,md5=%s", obj.Crc32c, obj.Md5Hash))
+ w.Header().Set("Last-Modified", obj.Updated.Format(http.TimeFormat))
+ w.Header().Set("ETag", fmt.Sprintf("%q", obj.Etag))
+ for name, value := range obj.Metadata {
+ w.Header().Set("X-Goog-Meta-"+name, value)
+ }
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+
+ if ranged && !satisfiable {
+ status = http.StatusRequestedRangeNotSatisfiable
+ content = bytes.NewReader([]byte(fmt.Sprintf(`<?xml version='1.0' encoding='UTF-8'?>`+
+ `<Error><Code>InvalidRange</Code>`+
+ `<Message>The requested range cannot be satisfied.</Message>`+
+ `<Details>%s</Details></Error>`, r.Header.Get("Range"))))
+ w.Header().Set(contentTypeHeader, "application/xml; charset=UTF-8")
+ } else {
+ if obj.ContentType != "" {
+ w.Header().Set(contentTypeHeader, obj.ContentType)
+ }
+ if obj.CacheControl != "" {
+ w.Header().Set(cacheControlHeader, obj.CacheControl)
+ }
+ // If content was transcoded, the underlying encoding was removed so we shouldn't report it.
+ if obj.ContentEncoding != "" && !transcoded {
+ w.Header().Set("Content-Encoding", obj.ContentEncoding)
+ }
+ if obj.ContentDisposition != "" {
+ w.Header().Set("Content-Disposition", obj.ContentDisposition)
+ }
+ // X-Goog-Stored-Content-Encoding must be set to the original encoding,
+ // defaulting to "identity" if no encoding was set.
+ storedContentEncoding := "identity"
+ if obj.ContentEncoding != "" {
+ storedContentEncoding = obj.ContentEncoding
+ }
+ w.Header().Set("X-Goog-Stored-Content-Encoding", storedContentEncoding)
+ }
+
w.WriteHeader(status)
if r.Method == http.MethodGet {
- w.Write(content)
+ io.Copy(w, content)
}
}
-func (s *Server) handleRange(obj Object, r *http.Request) (start, end int, content []byte) {
- if reqRange := r.Header.Get("Range"); reqRange != "" {
- parts := strings.SplitN(reqRange, "=", 2)
- if len(parts) == 2 && parts[0] == "bytes" {
- rangeParts := strings.SplitN(parts[1], "-", 2)
- if len(rangeParts) == 2 {
- start, _ = strconv.Atoi(rangeParts[0])
- end, _ = strconv.Atoi(rangeParts[1])
- if end < 1 {
- end = len(obj.Content)
- }
- return start, end, obj.Content[start:end]
+func (s *Server) handleRange(obj StreamingObject, r *http.Request) (ranged bool, start int64, lastByte int64, satisfiable bool) {
+ start, end, err := parseRange(r.Header.Get("Range"), obj.Size)
+ if err != nil {
+ // If the range isn't valid, GCS returns all content.
+ return false, 0, obj.Size - 1, false
+ }
+ // GCS is pretty flexible when it comes to invalid ranges. A 416 http
+ // response is only returned when the range start is beyond the length of
+ // the content. Otherwise, the range is ignored.
+ switch {
+ // Invalid start. Return 416 and NO content.
+ // Examples:
+ // Length: 40, Range: bytes=50-60
+ // Length: 40, Range: bytes=50-
+ case start >= obj.Size:
+ // This IS a ranged request, but it ISN'T satisfiable.
+ return true, 0, 0, false
+ // Negative range, ignore range and return all content.
+ // Examples:
+ // Length: 40, Range: bytes=30-20
+ case end < start:
+ return false, 0, obj.Size - 1, false
+ // Return range. Clamp start and end.
+ // Examples:
+ // Length: 40, Range: bytes=-100
+ // Length: 40, Range: bytes=0-100
+ default:
+ if start < 0 {
+ start = 0
+ }
+ if end >= obj.Size {
+ end = obj.Size - 1
+ }
+ return true, start, end, true
+ }
+}
+
+// parseRange parses the range header and returns the corresponding start and
+// end indices in the content. The end index is inclusive. This function
+// doesn't validate that the start and end indices fall within the content
+// bounds. The content length is only used to handle "suffix length" and
+// range-to-end ranges.
+func parseRange(rangeHeaderValue string, contentLength int64) (start int64, end int64, err error) {
+ // For information about the range header, see:
+ // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range
+ // https://httpwg.org/specs/rfc7233.html#header.range
+ // https://httpwg.org/specs/rfc7233.html#byte.ranges
+ // https://httpwg.org/specs/rfc7233.html#status.416
+ //
+ // <unit>=<range spec>
+ //
+ // The following ranges are parsed:
+ // "bytes=40-50" (range with given start and end)
+ // "bytes=40-" (range to end of content)
+ // "bytes=-40" (suffix length, offset from end of string)
+ //
+ // The unit MUST be "bytes".
+ parts := strings.SplitN(rangeHeaderValue, "=", 2)
+ if len(parts) != 2 {
+ return 0, 0, fmt.Errorf("expecting `=` in range header, got: %s", rangeHeaderValue)
+ }
+ if parts[0] != "bytes" {
+ return 0, 0, fmt.Errorf("invalid range unit, expecting `bytes`, got: %s", parts[0])
+ }
+ rangeSpec := parts[1]
+ if len(rangeSpec) == 0 {
+ return 0, 0, errors.New("empty range")
+ }
+ if rangeSpec[0] == '-' {
+ offsetFromEnd, err := strconv.ParseInt(rangeSpec, 10, 64)
+ if err != nil {
+ return 0, 0, fmt.Errorf("invalid suffix length, got: %s", rangeSpec)
+ }
+ start = contentLength + offsetFromEnd
+ end = contentLength - 1
+ } else {
+ rangeParts := strings.SplitN(rangeSpec, "-", 2)
+ if len(rangeParts) != 2 {
+ return 0, 0, fmt.Errorf("only one range supported, got: %s", rangeSpec)
+ }
+ start, err = strconv.ParseInt(rangeParts[0], 10, 64)
+ if err != nil {
+ return 0, 0, fmt.Errorf("invalid range start, got: %s", rangeParts[0])
+ }
+ if rangeParts[1] == "" {
+ end = contentLength - 1
+ } else {
+ end, err = strconv.ParseInt(rangeParts[1], 10, 64)
+ if err != nil {
+ return 0, 0, fmt.Errorf("invalid range end, got: %s", rangeParts[1])
}
}
}
- return 0, 0, obj.Content
+ return start, end, nil
+}
+
+func (s *Server) patchObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ bucketName := vars["bucketName"]
+ objectName := vars["objectName"]
+
+ type acls struct {
+ Entity string
+ Role string
+ }
+
+ var payload struct {
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ Metadata map[string]string `json:"metadata"`
+ CustomTime string
+ Acl []acls
+ }
+ err := json.NewDecoder(r.Body).Decode(&payload)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "Metadata in the request couldn't decode",
+ }
+ }
+
+ var attrsToUpdate backend.ObjectAttrs
+
+ attrsToUpdate.ContentType = payload.ContentType
+ attrsToUpdate.ContentEncoding = payload.ContentEncoding
+ attrsToUpdate.ContentDisposition = payload.ContentDisposition
+ attrsToUpdate.Metadata = payload.Metadata
+ attrsToUpdate.CustomTime = payload.CustomTime
+
+ if len(payload.Acl) > 0 {
+ attrsToUpdate.ACL = []storage.ACLRule{}
+ for _, aclData := range payload.Acl {
+ newAcl := storage.ACLRule{Entity: storage.ACLEntity(aclData.Entity), Role: storage.ACLRole(aclData.Role)}
+ attrsToUpdate.ACL = append(attrsToUpdate.ACL, newAcl)
+ }
+ }
+
+ backendObj, err := s.backend.PatchObject(bucketName, objectName, attrsToUpdate)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusNotFound,
+ errorMessage: "Object not found to be PATCHed",
+ }
+ }
+ defer backendObj.Close()
+
+ s.eventManager.Trigger(&backendObj, notification.EventMetadata, nil)
+ return jsonResponse{data: fromBackendObjects([]backend.StreamingObject{backendObj})[0]}
+}
+
+func (s *Server) updateObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ bucketName := vars["bucketName"]
+ objectName := vars["objectName"]
+
+ type acls struct {
+ Entity string
+ Role string
+ }
+
+ var payload struct {
+ Metadata map[string]string `json:"metadata"`
+ ContentType string `json:"contentType"`
+ ContentDisposition string `json:"contentDisposition"`
+ CustomTime string
+ Acl []acls
+ }
+ err := json.NewDecoder(r.Body).Decode(&payload)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "Metadata in the request couldn't decode",
+ }
+ }
+
+ var attrsToUpdate backend.ObjectAttrs
+
+ attrsToUpdate.Metadata = payload.Metadata
+ attrsToUpdate.CustomTime = payload.CustomTime
+ attrsToUpdate.ContentType = payload.ContentType
+ attrsToUpdate.ContentDisposition = payload.ContentDisposition
+ if len(payload.Acl) > 0 {
+ attrsToUpdate.ACL = []storage.ACLRule{}
+ for _, aclData := range payload.Acl {
+ newAcl := storage.ACLRule{Entity: storage.ACLEntity(aclData.Entity), Role: storage.ACLRole(aclData.Role)}
+ attrsToUpdate.ACL = append(attrsToUpdate.ACL, newAcl)
+ }
+ }
+ backendObj, err := s.backend.UpdateObject(bucketName, objectName, attrsToUpdate)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusNotFound,
+ errorMessage: "Object not found to be updated",
+ }
+ }
+ defer backendObj.Close()
+
+ s.eventManager.Trigger(&backendObj, notification.EventMetadata, nil)
+ return jsonResponse{data: fromBackendObjects([]backend.StreamingObject{backendObj})[0]}
+}
+
+func (s *Server) composeObject(r *http.Request) jsonResponse {
+ vars := unescapeMuxVars(mux.Vars(r))
+ bucketName := vars["bucketName"]
+ destinationObject := vars["destinationObject"]
+
+ var composeRequest struct {
+ SourceObjects []struct {
+ Name string
+ }
+ Destination struct {
+ Bucket string
+ ContentType string
+ ContentDisposition string
+ Metadata map[string]string
+ }
+ }
+
+ decoder := json.NewDecoder(r.Body)
+ err := decoder.Decode(&composeRequest)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "Error parsing request body",
+ }
+ }
+
+ const maxComposeObjects = 32
+ if len(composeRequest.SourceObjects) > maxComposeObjects {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: fmt.Sprintf("The number of source components provided (%d) exceeds the maximum (%d)", len(composeRequest.SourceObjects), maxComposeObjects),
+ }
+ }
+
+ sourceNames := make([]string, 0, len(composeRequest.SourceObjects))
+ for _, n := range composeRequest.SourceObjects {
+ sourceNames = append(sourceNames, n.Name)
+ }
+
+ backendObj, err := s.backend.ComposeObject(bucketName, sourceNames, destinationObject, composeRequest.Destination.Metadata, composeRequest.Destination.ContentType)
+ if err != nil {
+ return jsonResponse{
+ status: http.StatusInternalServerError,
+ errorMessage: "Error running compose",
+ }
+ }
+ defer backendObj.Close()
+
+ obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0]
+
+ s.eventManager.Trigger(&backendObj, notification.EventFinalize, nil)
+
+ return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)}
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go
index 92164cafb1057..f40dcd3fe9dd7 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go
@@ -4,73 +4,200 @@
package fakestorage
-import "sort"
+import (
+ "fmt"
+ "net/url"
+ "time"
+
+ "cloud.google.com/go/storage"
+ "github.com/fsouza/fake-gcs-server/internal/backend"
+)
+
+const timestampFormat = "2006-01-02T15:04:05.999999Z07:00"
+
+func formatTime(t time.Time) string {
+ if t.IsZero() {
+ return ""
+ }
+ return t.Format(timestampFormat)
+}
type listResponse struct {
- Kind string `json:"kind"`
- Items []interface{} `json:"items"`
- Prefixes []string `json:"prefixes"`
+ Kind string `json:"kind"`
+ Items []any `json:"items,omitempty"`
+ Prefixes []string `json:"prefixes,omitempty"`
}
-func newListBucketsResponse(bucketNames []string) listResponse {
+func newListBucketsResponse(buckets []backend.Bucket, location string) listResponse {
resp := listResponse{
Kind: "storage#buckets",
- Items: make([]interface{}, len(bucketNames)),
+ Items: make([]any, len(buckets)),
}
- sort.Strings(bucketNames)
- for i, name := range bucketNames {
- resp.Items[i] = newBucketResponse(name)
+ for i, bucket := range buckets {
+ resp.Items[i] = newBucketResponse(bucket, location)
}
return resp
}
type bucketResponse struct {
- Kind string `json:"kind"`
- ID string `json:"id"`
- Name string `json:"name"`
+ Kind string `json:"kind"`
+ ID string `json:"id"`
+ DefaultEventBasedHold bool `json:"defaultEventBasedHold"`
+ Name string `json:"name"`
+ Versioning *bucketVersioning `json:"versioning,omitempty"`
+ TimeCreated string `json:"timeCreated,omitempty"`
+ Updated string `json:"updated,omitempty"`
+ Location string `json:"location,omitempty"`
+ StorageClass string `json:"storageClass,omitempty"`
+ ProjectNumber string `json:"projectNumber"`
+ Metageneration string `json:"metageneration"`
+ Etag string `json:"etag"`
+ LocationType string `json:"locationType"`
}
-func newBucketResponse(bucketName string) bucketResponse {
+type bucketVersioning struct {
+ Enabled bool `json:"enabled"`
+}
+
+func newBucketResponse(bucket backend.Bucket, location string) bucketResponse {
return bucketResponse{
- Kind: "storage#bucket",
- ID: bucketName,
- Name: bucketName,
+ Kind: "storage#bucket",
+ ID: bucket.Name,
+ Name: bucket.Name,
+ DefaultEventBasedHold: bucket.DefaultEventBasedHold,
+ Versioning: &bucketVersioning{bucket.VersioningEnabled},
+ TimeCreated: formatTime(bucket.TimeCreated),
+ Updated: formatTime(bucket.TimeCreated), // not tracking update times yet, reporting `updated` = `timeCreated`
+ Location: location,
+ StorageClass: "STANDARD",
+ ProjectNumber: "0",
+ Metageneration: "1",
+ Etag: "RVRhZw==",
+ LocationType: "region",
}
}
-func newListObjectsResponse(objs []Object, prefixes []string) listResponse {
+func newListObjectsResponse(objs []ObjectAttrs, prefixes []string, externalURL string) listResponse {
resp := listResponse{
Kind: "storage#objects",
- Items: make([]interface{}, len(objs)),
+ Items: make([]any, len(objs)),
Prefixes: prefixes,
}
for i, obj := range objs {
- resp.Items[i] = newObjectResponse(obj)
+ resp.Items[i] = newObjectResponse(obj, externalURL)
}
return resp
}
+// objectAccessControl is copied from the Google SDK to avoid direct
+// dependency.
+type objectAccessControl struct {
+ Bucket string `json:"bucket,omitempty"`
+ Domain string `json:"domain,omitempty"`
+ Email string `json:"email,omitempty"`
+ Entity string `json:"entity,omitempty"`
+ EntityID string `json:"entityId,omitempty"`
+ Etag string `json:"etag,omitempty"`
+ Generation int64 `json:"generation,omitempty,string"`
+ ID string `json:"id,omitempty"`
+ Kind string `json:"kind,omitempty"`
+ Object string `json:"object,omitempty"`
+ ProjectTeam struct {
+ ProjectNumber string `json:"projectNumber,omitempty"`
+ Team string `json:"team,omitempty"`
+ } `json:"projectTeam,omitempty"`
+ Role string `json:"role,omitempty"`
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
type objectResponse struct {
- Kind string `json:"kind"`
- Name string `json:"name"`
- ID string `json:"id"`
- Bucket string `json:"bucket"`
- Size int64 `json:"size,string"`
- // Crc32c: CRC32c checksum, same as in google storage client code
- Crc32c string `json:"crc32c,omitempty"`
- Md5Hash string `json:"md5hash,omitempty"`
+ Kind string `json:"kind"`
+ Name string `json:"name"`
+ ID string `json:"id"`
+ Bucket string `json:"bucket"`
+ Size int64 `json:"size,string"`
+ ContentType string `json:"contentType,omitempty"`
+ ContentEncoding string `json:"contentEncoding,omitempty"`
+ ContentDisposition string `json:"contentDisposition,omitempty"`
+ Crc32c string `json:"crc32c,omitempty"`
+ ACL []*objectAccessControl `json:"acl,omitempty"`
+ Md5Hash string `json:"md5Hash,omitempty"`
+ Etag string `json:"etag,omitempty"`
+ StorageClass string `json:"storageClass"`
+ TimeCreated string `json:"timeCreated,omitempty"`
+ TimeDeleted string `json:"timeDeleted,omitempty"`
+ TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"`
+ Updated string `json:"updated,omitempty"`
+ Generation int64 `json:"generation,string"`
+ CustomTime string `json:"customTime,omitempty"`
+ Metadata map[string]string `json:"metadata,omitempty"`
+ SelfLink string `json:"selfLink,omitempty"`
+ MediaLink string `json:"mediaLink,omitempty"`
+ Metageneration string `json:"metageneration,omitempty"`
}
-func newObjectResponse(obj Object) objectResponse {
+func newProjectedObjectResponse(obj ObjectAttrs, externalURL string, projection storage.Projection) objectResponse {
+ objResponse := newObjectResponse(obj, externalURL)
+ if projection == storage.ProjectionNoACL {
+ objResponse.ACL = nil
+ }
+ return objResponse
+}
+
+func newObjectResponse(obj ObjectAttrs, externalURL string) objectResponse {
+ acl := getAccessControlsListFromObject(obj)
+
return objectResponse{
- Kind: "storage#object",
- ID: obj.id(),
- Bucket: obj.BucketName,
- Name: obj.Name,
- Size: int64(len(obj.Content)),
- Crc32c: obj.Crc32c,
- Md5Hash: obj.Md5Hash,
+ Kind: "storage#object",
+ ID: obj.id(),
+ Bucket: obj.BucketName,
+ Name: obj.Name,
+ Size: obj.Size,
+ ContentType: obj.ContentType,
+ ContentEncoding: obj.ContentEncoding,
+ ContentDisposition: obj.ContentDisposition,
+ Crc32c: obj.Crc32c,
+ Md5Hash: obj.Md5Hash,
+ Etag: obj.Etag,
+ ACL: acl,
+ StorageClass: "STANDARD",
+ Metadata: obj.Metadata,
+ TimeCreated: formatTime(obj.Created),
+ TimeDeleted: formatTime(obj.Deleted),
+ TimeStorageClassUpdated: formatTime(obj.Updated),
+ Updated: formatTime(obj.Updated),
+ CustomTime: formatTime(obj.CustomTime),
+ Generation: obj.Generation,
+ SelfLink: fmt.Sprintf("%s/storage/v1/b/%s/o/%s", externalURL, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)),
+ MediaLink: fmt.Sprintf("%s/download/storage/v1/b/%s/o/%s?alt=media", externalURL, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)),
+ Metageneration: "1",
+ }
+}
+
+type aclListResponse struct {
+ Items []*objectAccessControl `json:"items"`
+}
+
+func newACLListResponse(obj ObjectAttrs) aclListResponse {
+ if len(obj.ACL) == 0 {
+ return aclListResponse{}
+ }
+ return aclListResponse{Items: getAccessControlsListFromObject(obj)}
+}
+
+func getAccessControlsListFromObject(obj ObjectAttrs) []*objectAccessControl {
+ aclItems := make([]*objectAccessControl, len(obj.ACL))
+ for idx, aclRule := range obj.ACL {
+ aclItems[idx] = &objectAccessControl{
+ Bucket: obj.BucketName,
+ Entity: string(aclRule.Entity),
+ Object: obj.Name,
+ Role: string(aclRule.Role),
+ Etag: "RVRhZw==",
+ Kind: "storage#objectAccessControl",
+ }
}
+ return aclItems
}
type rewriteResponse struct {
@@ -82,14 +209,14 @@ type rewriteResponse struct {
Resource objectResponse `json:"resource"`
}
-func newObjectRewriteResponse(obj Object) rewriteResponse {
+func newObjectRewriteResponse(obj ObjectAttrs, externalURL string) rewriteResponse {
return rewriteResponse{
Kind: "storage#rewriteResponse",
- TotalBytesRewritten: int64(len(obj.Content)),
- ObjectSize: int64(len(obj.Content)),
+ TotalBytesRewritten: obj.Size,
+ ObjectSize: obj.Size,
Done: true,
RewriteToken: "",
- Resource: newObjectResponse(obj),
+ Resource: newObjectResponse(obj, externalURL),
}
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go
index 165d9d7ec2ed4..4283ccf030dc0 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go
@@ -5,30 +5,52 @@
package fakestorage
import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
"context"
"crypto/tls"
+ "errors"
"fmt"
+ "io"
+ "mime"
+ "mime/multipart"
"net"
"net/http"
"net/http/httptest"
+ "net/http/httputil"
+ "net/textproto"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
"sync"
"cloud.google.com/go/storage"
"github.com/fsouza/fake-gcs-server/internal/backend"
+ "github.com/fsouza/fake-gcs-server/internal/checksum"
+ "github.com/fsouza/fake-gcs-server/internal/notification"
+ "github.com/gorilla/handlers"
"github.com/gorilla/mux"
+ "golang.org/x/oauth2/google"
"google.golang.org/api/option"
)
+const defaultPublicHost = "storage.googleapis.com"
+
// Server is the fake server.
//
// It provides a fake implementation of the Google Cloud Storage API.
type Server struct {
- backend backend.Storage
- uploads map[string]Object
- transport http.RoundTripper
- ts *httptest.Server
- mux *mux.Router
- mtx sync.RWMutex
+ backend backend.Storage
+ uploads sync.Map
+ transport http.RoundTripper
+ ts *httptest.Server
+ handler http.Handler
+ options Options
+ externalURL string
+ publicHost string
+ eventManager notification.EventManager
}
// NewServer creates a new instance of the server, pre-loaded with the given
@@ -41,6 +63,8 @@ func NewServer(objects []Object) *Server {
}
// NewServerWithHostPort creates a new server that listens on a custom host and port
+//
+// Deprecated: use NewServerWithOptions.
func NewServerWithHostPort(objects []Object, host string, port uint16) (*Server, error) {
return NewServerWithOptions(Options{
InitialObjects: objects,
@@ -49,30 +73,106 @@ func NewServerWithHostPort(objects []Object, host string, port uint16) (*Server,
})
}
-// Options are used to configure the server on creation
+type EventManagerOptions = notification.EventManagerOptions
+
+type EventNotificationOptions = notification.EventNotificationOptions
+
+// Options are used to configure the server on creation.
type Options struct {
InitialObjects []Object
StorageRoot string
+ Seed string
+ Scheme string
Host string
Port uint16
// when set to true, the server will not actually start a TCP listener,
// client requests will get processed by an internal mocked transport.
NoListener bool
+
+ // Optional external URL, such as https://gcs.127.0.0.1.nip.io:4443
+ // Returned in the Location header for resumable uploads
+ // The "real" value is https://www.googleapis.com, the JSON API
+ // The default is whatever the server is bound to, such as https://0.0.0.0:4443
+ ExternalURL string
+
+ // Optional URL for public access
+ // An example is "storage.gcs.127.0.0.1.nip.io:4443", which will configure
+ // the server to serve objects at:
+ // https://storage.gcs.127.0.0.1.nip.io:4443/<bucket>/<object>
+ // https://<bucket>.storage.gcs.127.0.0.1.nip.io:4443>/<object>
+ // If unset, the default is "storage.googleapis.com", the XML API
+ PublicHost string
+
+ // Optional list of headers to add to the CORS header allowlist
+ // An example is "X-Goog-Meta-Uploader", which will allow a
+ // custom metadata header named "X-Goog-Meta-Uploader" to be
+ // sent through the browser
+ AllowedCORSHeaders []string
+
+ // Destination for writing log.
+ Writer io.Writer
+
+ // EventOptions contains the events that should be published and the URL
+ // of the Google cloud function such events should be published to.
+ EventOptions EventManagerOptions
+
+ // Location used for buckets in the server.
+ BucketsLocation string
+
+ CertificateLocation string
+
+ PrivateKeyLocation string
}
-// NewServerWithOptions creates a new server with custom options
+// NewServerWithOptions creates a new server configured according to the
+// provided options.
func NewServerWithOptions(options Options) (*Server, error) {
- s, err := newServer(options.InitialObjects, options.StorageRoot)
+ s, err := newServer(options)
+ if err != nil {
+ return nil, err
+ }
+
+ allowedHeaders := []string{"Content-Type", "Content-Encoding", "Range", "Content-Range"}
+ allowedHeaders = append(allowedHeaders, options.AllowedCORSHeaders...)
+
+ cors := handlers.CORS(
+ handlers.AllowedMethods([]string{
+ http.MethodHead,
+ http.MethodGet,
+ http.MethodPost,
+ http.MethodPut,
+ http.MethodPatch,
+ http.MethodDelete,
+ }),
+ handlers.AllowedHeaders(allowedHeaders),
+ handlers.AllowedOrigins([]string{"*"}),
+ handlers.AllowCredentials(),
+ handlers.ExposedHeaders([]string{"Location"}),
+ )
+
+ s.handler = cors(s.handler)
+ if options.Writer != nil {
+ s.handler = handlers.LoggingHandler(options.Writer, s.handler)
+ }
+ s.handler = requestCompressHandler(s.handler)
+ s.transport = &muxTransport{handler: s.handler}
+
+ s.eventManager, err = notification.NewPubsubEventManager(options.EventOptions, options.Writer)
if err != nil {
return nil, err
}
+
if options.NoListener {
- s.setTransportToMux()
return s, nil
}
- s.ts = httptest.NewUnstartedServer(s.mux)
+ s.ts = httptest.NewUnstartedServer(s.handler)
+ startFunc := s.ts.StartTLS
+ if options.Scheme == "http" {
+ startFunc = s.ts.Start
+ }
+
if options.Port != 0 {
addr := fmt.Sprintf("%s:%d", options.Host, options.Port)
l, err := net.Listen("tcp", addr)
@@ -81,64 +181,255 @@ func NewServerWithOptions(options Options) (*Server, error) {
}
s.ts.Listener.Close()
s.ts.Listener = l
- s.ts.StartTLS()
- } else {
- s.ts.StartTLS()
}
- s.setTransportToAddr(s.ts.Listener.Addr().String())
+ if options.CertificateLocation != "" && options.PrivateKeyLocation != "" {
+ cert, err := tls.LoadX509KeyPair(options.CertificateLocation, options.PrivateKeyLocation)
+ if err != nil {
+ return nil, err
+ }
+ s.ts.TLS = &tls.Config{Certificates: []tls.Certificate{cert}}
+ }
+ startFunc()
+
return s, nil
}
-func newServer(objects []Object, storageRoot string) (*Server, error) {
- backendObjects := toBackendObjects(objects)
+func newServer(options Options) (*Server, error) {
+ if len(options.InitialObjects) > 0 && options.Seed != "" {
+ return nil, errors.New("please provide either a seed directory or a list of initial objects")
+ }
+
+ var backendObjects []backend.StreamingObject
+ if len(options.InitialObjects) > 0 {
+ backendObjects = bufferedObjectsToBackendObjects(options.InitialObjects)
+ }
+
var backendStorage backend.Storage
var err error
- if storageRoot != "" {
- backendStorage, err = backend.NewStorageFS(backendObjects, storageRoot)
+ if options.StorageRoot != "" {
+ backendStorage, err = backend.NewStorageFS(backendObjects, options.StorageRoot)
} else {
- backendStorage = backend.NewStorageMemory(backendObjects)
+ backendStorage, err = backend.NewStorageMemory(backendObjects)
}
if err != nil {
return nil, err
}
+ publicHost := options.PublicHost
+ if publicHost == "" {
+ publicHost = defaultPublicHost
+ }
+
s := Server{
- backend: backendStorage,
- uploads: make(map[string]Object),
+ backend: backendStorage,
+ uploads: sync.Map{},
+ externalURL: options.ExternalURL,
+ publicHost: publicHost,
+ options: options,
+ eventManager: ¬ification.PubsubEventManager{},
}
s.buildMuxer()
+ _, err = s.seed()
+ if err != nil {
+ return nil, err
+ }
return &s, nil
}
-func (s *Server) setTransportToAddr(addr string) {
- // #nosec
- tlsConfig := tls.Config{InsecureSkipVerify: true}
- s.transport = &http.Transport{
- TLSClientConfig: &tlsConfig,
- DialTLS: func(string, string) (net.Conn, error) {
- return tls.Dial("tcp", addr, &tlsConfig)
- },
+func unescapeMuxVars(vars map[string]string) map[string]string {
+ m := make(map[string]string)
+ for k, v := range vars {
+ r, err := url.PathUnescape(v)
+ if err == nil {
+ m[k] = r
+ } else {
+ m[k] = v
+ }
}
+ return m
}
-func (s *Server) setTransportToMux() {
- s.transport = &muxTransport{router: s.mux}
+func (s *Server) buildMuxer() {
+ const apiPrefix = "/storage/v1"
+ handler := mux.NewRouter().SkipClean(true).UseEncodedPath()
+
+ // healthcheck
+ handler.Path("/_internal/healthcheck").Methods(http.MethodGet).HandlerFunc(s.healthcheck)
+
+ routers := []*mux.Router{
+ handler.PathPrefix(apiPrefix).Subrouter(),
+ handler.MatcherFunc(s.publicHostMatcher).PathPrefix(apiPrefix).Subrouter(),
+ }
+
+ for _, r := range routers {
+ r.Path("/b").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listBuckets))
+ r.Path("/b/").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listBuckets))
+ r.Path("/b").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.createBucketByPost))
+ r.Path("/b/").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.createBucketByPost))
+ r.Path("/b/{bucketName}").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.getBucket))
+ r.Path("/b/{bucketName}").Methods(http.MethodPatch).HandlerFunc(jsonToHTTPHandler(s.updateBucket))
+ r.Path("/b/{bucketName}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteBucket))
+ r.Path("/b/{bucketName}/o").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjects))
+ r.Path("/b/{bucketName}/o/").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjects))
+ r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodPatch).HandlerFunc(jsonToHTTPHandler(s.patchObject))
+ r.Path("/b/{bucketName}/o/{objectName:.+}/acl").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjectACL))
+ r.Path("/b/{bucketName}/o/{objectName:.+}/acl").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.setObjectACL))
+ r.Path("/b/{bucketName}/o/{objectName:.+}/acl/{entity}").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.setObjectACL))
+ r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.getObject)
+ r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteObject))
+ r.Path("/b/{sourceBucket}/o/{sourceObject:.+}/{copyType:rewriteTo|copyTo}/b/{destinationBucket}/o/{destinationObject:.+}").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.rewriteObject))
+ r.Path("/b/{bucketName}/o/{destinationObject:.+}/compose").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.composeObject))
+ r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.updateObject))
+ }
+
+ // Internal / update server configuration
+ handler.Path("/_internal/config").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.updateServerConfig))
+ handler.MatcherFunc(s.publicHostMatcher).Path("/_internal/config").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.updateServerConfig))
+ handler.Path("/_internal/reseed").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.reseedServer))
+ // Internal - end
+
+ // XML API
+ xmlApiRouters := []*mux.Router{
+ handler.Host(fmt.Sprintf("{bucketName}.%s", s.publicHost)).Subrouter(),
+ handler.MatcherFunc(s.publicHostMatcher).PathPrefix(`/{bucketName}`).Subrouter(),
+ }
+ for _, r := range xmlApiRouters {
+ r.Path("/").Methods(http.MethodGet).HandlerFunc(xmlToHTTPHandler(s.xmlListObjects))
+ r.Path("").Methods(http.MethodGet).HandlerFunc(xmlToHTTPHandler(s.xmlListObjects))
+ }
+
+ bucketHost := fmt.Sprintf("{bucketName}.%s", s.publicHost)
+ handler.Host(bucketHost).Path("/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject)
+ handler.Path("/download/storage/v1/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject)
+ handler.Path("/upload/storage/v1/b/{bucketName}/o").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+ handler.Path("/upload/storage/v1/b/{bucketName}/o/").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+ handler.Path("/upload/storage/v1/b/{bucketName}/o").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent))
+ handler.Path("/upload/storage/v1/b/{bucketName}/o/").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent))
+ handler.Path("/upload/resumable/{uploadId}").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent))
+
+ // Batch endpoint
+ handler.MatcherFunc(s.publicHostMatcher).Path("/batch/storage/v1").Methods(http.MethodPost).HandlerFunc(s.handleBatchCall)
+ handler.Path("/batch/storage/v1").Methods(http.MethodPost).HandlerFunc(s.handleBatchCall)
+
+ handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject)
+ handler.Host("{bucketName:.+}").Path("/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject)
+
+ // Form Uploads
+ handler.Host(s.publicHost).Path("/{bucketName}").MatcherFunc(matchFormData).Methods(http.MethodPost, http.MethodPut).HandlerFunc(xmlToHTTPHandler(s.insertFormObject))
+ handler.Host(bucketHost).MatcherFunc(matchFormData).Methods(http.MethodPost, http.MethodPut).HandlerFunc(xmlToHTTPHandler(s.insertFormObject))
+
+ // Signed URLs (upload and download)
+ handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+ handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.getObject)
+ handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteObject))
+ handler.Host(bucketHost).Path("/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+ handler.Host("{bucketName:.+}").Path("/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject))
+
+ s.handler = handler
}
-func (s *Server) buildMuxer() {
- s.mux = mux.NewRouter()
- s.mux.Host("storage.googleapis.com").Path("/{bucketName}/{objectName:.+}").Methods("GET", "HEAD").HandlerFunc(s.downloadObject)
- s.mux.Host("{bucketName}.storage.googleapis.com").Path("/{objectName:.+}").Methods("GET", "HEAD").HandlerFunc(s.downloadObject)
- r := s.mux.PathPrefix("/storage/v1").Subrouter()
- r.Path("/b").Methods("GET").HandlerFunc(s.listBuckets)
- r.Path("/b/{bucketName}").Methods("GET").HandlerFunc(s.getBucket)
- r.Path("/b/{bucketName}/o").Methods("GET").HandlerFunc(s.listObjects)
- r.Path("/b/{bucketName}/o").Methods("POST").HandlerFunc(s.insertObject)
- r.Path("/b/{bucketName}/o/{objectName:.+}").Methods("GET").HandlerFunc(s.getObject)
- r.Path("/b/{bucketName}/o/{objectName:.+}").Methods("DELETE").HandlerFunc(s.deleteObject)
- r.Path("/b/{sourceBucket}/o/{sourceObject:.+}/rewriteTo/b/{destinationBucket}/o/{destinationObject:.+}").HandlerFunc(s.rewriteObject)
- s.mux.Path("/download/storage/v1/b/{bucketName}/o/{objectName}").Methods("GET").HandlerFunc(s.downloadObject)
- s.mux.Path("/upload/storage/v1/b/{bucketName}/o").Methods("POST").HandlerFunc(s.insertObject)
- s.mux.Path("/upload/resumable/{uploadId}").Methods("PUT", "POST").HandlerFunc(s.uploadFileContent)
+func (s *Server) seed() ([]backend.StreamingObject, error) {
+ if s.options.Seed == "" {
+ return nil, nil
+ }
+
+ initialObjects, emptyBuckets := generateObjectsFromFiles(s.options.Seed)
+
+ backendObjects := bufferedObjectsToBackendObjects(initialObjects)
+
+ var err error
+ if s.options.StorageRoot != "" {
+ s.backend, err = backend.NewStorageFS(backendObjects, s.options.StorageRoot)
+ } else {
+ s.backend, err = backend.NewStorageMemory(backendObjects)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ for _, bucketName := range emptyBuckets {
+ s.CreateBucketWithOpts(CreateBucketOpts{Name: bucketName})
+ }
+ return backendObjects, nil
+}
+
+func (s *Server) reseedServer(r *http.Request) jsonResponse {
+ backendObjects, err := s.seed()
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+
+ return jsonResponse{data: fromBackendObjects(backendObjects)}
+}
+
+func generateObjectsFromFiles(folder string) ([]Object, []string) {
+ var objects []Object
+ var emptyBuckets []string
+ if files, err := os.ReadDir(folder); err == nil {
+ for _, f := range files {
+ if !f.IsDir() {
+ continue
+ }
+ bucketName := f.Name()
+ localBucketPath := filepath.Join(folder, bucketName)
+
+ bucketObjects, err := objectsFromBucket(localBucketPath, bucketName)
+ if err != nil {
+ continue
+ }
+
+ if len(bucketObjects) < 1 {
+ emptyBuckets = append(emptyBuckets, bucketName)
+ }
+ objects = append(objects, bucketObjects...)
+ }
+ }
+ return objects, emptyBuckets
+}
+
+func objectsFromBucket(localBucketPath, bucketName string) ([]Object, error) {
+ var objects []Object
+ err := filepath.Walk(localBucketPath, func(path string, info os.FileInfo, _ error) error {
+ if info.Mode().IsRegular() {
+ // Rel() should never return error since path always descend from localBucketPath
+ relPath, _ := filepath.Rel(localBucketPath, path)
+ objectKey := filepath.ToSlash(relPath)
+ fileContent, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("could not read file %q: %w", path, err)
+ }
+ objects = append(objects, Object{
+ ObjectAttrs: ObjectAttrs{
+ ACL: []storage.ACLRule{
+ {
+ Entity: "projectOwner-test-project",
+ Role: "OWNER",
+ },
+ },
+ BucketName: bucketName,
+ Name: objectKey,
+ ContentType: mime.TypeByExtension(filepath.Ext(path)),
+ Crc32c: checksum.EncodedCrc32cChecksum(fileContent),
+ Md5Hash: checksum.EncodedMd5Hash(fileContent),
+ },
+ Content: fileContent,
+ })
+ }
+ return nil
+ })
+ return objects, err
+}
+
+func (s *Server) healthcheck(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+}
+
+// publicHostMatcher matches incoming requests against the currently specified server publicHost.
+func (s *Server) publicHostMatcher(r *http.Request, rm *mux.RouteMatch) bool {
+ if strings.Contains(s.publicHost, ":") || !strings.Contains(r.Host, ":") {
+ return r.Host == s.publicHost
+ }
+ idx := strings.IndexByte(r.Host, ':')
+ return r.Host[:idx] == s.publicHost
}
// Stop stops the server, closing all connections.
@@ -153,20 +444,136 @@ func (s *Server) Stop() {
// URL returns the server URL.
func (s *Server) URL() string {
+ if s.externalURL != "" {
+ return s.externalURL
+ }
if s.ts != nil {
return s.ts.URL
}
return ""
}
+// PublicURL returns the server's public download URL.
+func (s *Server) PublicURL() string {
+ return fmt.Sprintf("%s://%s", s.scheme(), s.publicHost)
+}
+
+func (s *Server) Backend() backend.Storage {
+ return s.backend
+}
+
+func (s *Server) scheme() string {
+ if s.options.Scheme == "http" {
+ return "http"
+ }
+ return "https"
+}
+
// HTTPClient returns an HTTP client configured to talk to the server.
func (s *Server) HTTPClient() *http.Client {
return &http.Client{Transport: s.transport}
}
+// HTTPHandler returns an HTTP handler that behaves like GCS.
+func (s *Server) HTTPHandler() http.Handler {
+ return s.handler
+}
+
// Client returns a GCS client configured to talk to the server.
func (s *Server) Client() *storage.Client {
- opt := option.WithHTTPClient(s.HTTPClient())
- client, _ := storage.NewClient(context.Background(), opt)
+ client, err := storage.NewClient(context.Background(), option.WithHTTPClient(s.HTTPClient()), option.WithCredentials(&google.Credentials{}))
+ if err != nil {
+ panic(err)
+ }
return client
}
+
+func (s *Server) handleBatchCall(w http.ResponseWriter, r *http.Request) {
+ reader, err := r.MultipartReader()
+ if err != nil {
+ http.Error(w, "invalid Content-Type header", http.StatusBadRequest)
+ return
+ }
+
+ var b bytes.Buffer
+ mw := multipart.NewWriter(&b)
+ defer mw.Close()
+ w.Header().Set("Content-Type", "multipart/mixed; boundary="+mw.Boundary())
+
+ w.WriteHeader(http.StatusOK)
+ part, err := reader.NextPart()
+ for ; err == nil; part, err = reader.NextPart() {
+ contentID := part.Header.Get("Content-ID")
+ if contentID == "" {
+ // missing content ID, skip
+ continue
+ }
+
+ partHeaders := textproto.MIMEHeader{}
+ partHeaders.Set("Content-Type", "application/http")
+ partHeaders.Set("Content-ID", strings.Replace(contentID, "<", "<response-", 1))
+ partWriter, err := mw.CreatePart(partHeaders)
+ if err != nil {
+ continue
+ }
+
+ partResponseWriter := httptest.NewRecorder()
+ if part.Header.Get("Content-Type") != "application/http" {
+ http.Error(partResponseWriter, "invalid Content-Type header", http.StatusBadRequest)
+ writeMultipartResponse(partResponseWriter.Result(), partWriter, contentID)
+ continue
+ }
+
+ content, err := io.ReadAll(part)
+ part.Close()
+ if err != nil {
+ http.Error(partResponseWriter, "unable to process request", http.StatusBadRequest)
+ writeMultipartResponse(partResponseWriter.Result(), partWriter, contentID)
+ continue
+ }
+
+ partRequest, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(content)))
+ if err != nil {
+ http.Error(partResponseWriter, "unable to process request", http.StatusBadRequest)
+ writeMultipartResponse(partResponseWriter.Result(), partWriter, contentID)
+ continue
+ }
+
+ s.handler.ServeHTTP(partResponseWriter, partRequest)
+ writeMultipartResponse(partResponseWriter.Result(), partWriter, contentID)
+ }
+ mw.Close()
+
+ _, err = b.WriteTo(w)
+ if err != nil {
+ http.Error(w, "unable to process request", http.StatusBadRequest)
+ }
+}
+
+func writeMultipartResponse(r *http.Response, w io.Writer, contentId string) {
+ dump, err := httputil.DumpResponse(r, true)
+ if err != nil {
+ fmt.Fprintf(w, "Content-Type: text/plain; charset=utf-8\r\nContent-ID: %s\r\nContent-Length: 0\r\n\r\nHTTP/1.1 500 Internal Server Error", contentId)
+ return
+ }
+ w.Write(dump)
+}
+
+func requestCompressHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("content-encoding") == "gzip" {
+ gzipReader, err := gzip.NewReader(r.Body)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ r.Body = gzipReader
+ }
+ h.ServeHTTP(w, r)
+ })
+}
+
+func matchFormData(r *http.Request, _ *mux.RouteMatch) bool {
+ contentType, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ return contentType == "multipart/form-data"
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/upload.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/upload.go
index 8f3c85a26a288..e9181df46f47a 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/upload.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/upload.go
@@ -5,245 +5,629 @@
package fakestorage
import (
- "crypto/md5" // #nosec G501
+ "bytes"
"crypto/rand"
"encoding/json"
+ "errors"
"fmt"
"io"
- "io/ioutil"
"mime"
"mime/multipart"
"net/http"
+ "net/url"
+ "regexp"
"strconv"
"strings"
+ "time"
- "encoding/base64"
- "hash/crc32"
-
+ "cloud.google.com/go/storage"
+ "github.com/fsouza/fake-gcs-server/internal/backend"
+ "github.com/fsouza/fake-gcs-server/internal/checksum"
"github.com/gorilla/mux"
)
+const (
+ contentTypeHeader = "Content-Type"
+ cacheControlHeader = "Cache-Control"
+)
+
+const (
+ uploadTypeMedia = "media"
+ uploadTypeMultipart = "multipart"
+ uploadTypeResumable = "resumable"
+)
+
+// per RFC 2045, double quotes should be used whenever parameters have a value
+// that includes some special character - anything in the set: ()<>@,;:\"/[]?=
+// (including space). gsutil likes to use `=` in the boundary, but incorrectly
+// quotes it using single quotes.
+//
+// We do exclude \ and " from the regexp because those are not supported by the
+// mime package.
+//
+// This has been reported to gsutil
+// (https://github.com/GoogleCloudPlatform/gsutil/issues/1466). If that issue
+// ever gets closed, we should be able to get rid of this hack.
+var gsutilBoundary = regexp.MustCompile(`boundary='([^']*[()<>@,;:"/\[\]?= ]+[^']*)'`)
+
type multipartMetadata struct {
- Name string `json:"name"`
+ ContentType string `json:"contentType"`
+ ContentEncoding string `json:"contentEncoding"`
+ ContentDisposition string `json:"contentDisposition"`
+ CacheControl string `json:"cacheControl"`
+ CustomTime time.Time `json:"customTime,omitempty"`
+ Name string `json:"name"`
+ Metadata map[string]string `json:"metadata"`
+}
+
+type contentRange struct {
+ KnownRange bool // Is the range known, or "*"?
+ KnownTotal bool // Is the total known, or "*"?
+ Start int // Start of the range, -1 if unknown
+ End int // End of the range, -1 if unknown
+ Total int // Total bytes expected, -1 if unknown
}
-func (s *Server) insertObject(w http.ResponseWriter, r *http.Request) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- bucketName := mux.Vars(r)["bucketName"]
- if err := s.backend.GetBucket(bucketName); err != nil {
- w.WriteHeader(http.StatusNotFound)
- err := newErrorResponse(http.StatusNotFound, "Not found", nil)
- json.NewEncoder(w).Encode(err)
- return
+type generationCondition struct {
+ ifGenerationMatch *int64
+ ifGenerationNotMatch *int64
+}
+
+func (c generationCondition) ConditionsMet(activeGeneration int64) bool {
+ if c.ifGenerationMatch != nil && *c.ifGenerationMatch != activeGeneration {
+ return false
+ }
+ if c.ifGenerationNotMatch != nil && *c.ifGenerationNotMatch == activeGeneration {
+ return false
+ }
+ return true
+}
+
+func (s *Server) insertObject(r *http.Request) jsonResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+
+ if _, err := s.backend.GetBucket(bucketName); err != nil {
+ return jsonResponse{status: http.StatusNotFound}
}
uploadType := r.URL.Query().Get("uploadType")
+ if uploadType == "" && r.Header.Get("X-Goog-Upload-Protocol") == uploadTypeResumable {
+ uploadType = uploadTypeResumable
+ }
+
switch uploadType {
- case "media":
- s.simpleUpload(bucketName, w, r)
- case "multipart":
- s.multipartUpload(bucketName, w, r)
- case "resumable":
- s.resumableUpload(bucketName, w, r)
+ case uploadTypeMedia:
+ return s.simpleUpload(bucketName, r)
+ case uploadTypeMultipart:
+ return s.multipartUpload(bucketName, r)
+ case uploadTypeResumable:
+ return s.resumableUpload(bucketName, r)
default:
- http.Error(w, "invalid uploadType", http.StatusBadRequest)
+ // Support Signed URL Uploads
+ if r.URL.Query().Get("X-Goog-Algorithm") != "" {
+ switch r.Method {
+ case http.MethodPost:
+ return s.resumableUpload(bucketName, r)
+ case http.MethodPut:
+ return s.signedUpload(bucketName, r)
+ }
+ }
+ return jsonResponse{errorMessage: "invalid uploadType", status: http.StatusBadRequest}
}
}
-func (s *Server) simpleUpload(bucketName string, w http.ResponseWriter, r *http.Request) {
- defer r.Body.Close()
- name := r.URL.Query().Get("name")
+func (s *Server) insertFormObject(r *http.Request) xmlResponse {
+ bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"]
+
+ if err := r.ParseMultipartForm(32 << 20); nil != err {
+ return xmlResponse{errorMessage: "invalid form", status: http.StatusBadRequest}
+ }
+
+ // Load metadata
+ var name string
+ if keys, ok := r.MultipartForm.Value["key"]; ok {
+ name = keys[0]
+ }
if name == "" {
- http.Error(w, "name is required for simple uploads", http.StatusBadRequest)
- return
+ return xmlResponse{errorMessage: "missing key", status: http.StatusBadRequest}
+ }
+ var predefinedACL string
+ if acls, ok := r.MultipartForm.Value["acl"]; ok {
+ predefinedACL = acls[0]
+ }
+ var contentEncoding string
+ if contentEncodings, ok := r.MultipartForm.Value["Content-Encoding"]; ok {
+ contentEncoding = contentEncodings[0]
+ }
+ var contentType string
+ if contentTypes, ok := r.MultipartForm.Value["Content-Type"]; ok {
+ contentType = contentTypes[0]
+ }
+ successActionStatus := http.StatusNoContent
+ if successActionStatuses, ok := r.MultipartForm.Value["success_action_status"]; ok {
+ successInt, err := strconv.Atoi(successActionStatuses[0])
+ if err != nil {
+ return xmlResponse{errorMessage: err.Error(), status: http.StatusBadRequest}
+ }
+ if successInt != http.StatusOK && successInt != http.StatusCreated && successInt != http.StatusNoContent {
+ return xmlResponse{errorMessage: "invalid success action status", status: http.StatusBadRequest}
+ }
+ successActionStatus = successInt
+ }
+ metaData := make(map[string]string)
+ for key := range r.MultipartForm.Value {
+ lowerKey := strings.ToLower(key)
+ if metaDataKey := strings.TrimPrefix(lowerKey, "x-goog-meta-"); metaDataKey != lowerKey {
+ metaData[metaDataKey] = r.MultipartForm.Value[key][0]
+ }
+ }
+
+ // Load file
+ var file *multipart.FileHeader
+ if files, ok := r.MultipartForm.File["file"]; ok {
+ file = files[0]
+ }
+ if file == nil {
+ return xmlResponse{errorMessage: "missing file", status: http.StatusBadRequest}
}
- data, err := ioutil.ReadAll(r.Body)
+ infile, err := file.Open()
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return xmlResponse{errorMessage: err.Error()}
}
- obj := Object{BucketName: bucketName, Name: name, Content: data, Crc32c: encodedCrc32cChecksum(data), Md5Hash: encodedMd5Hash(data)}
- err = s.createObject(obj)
+ obj := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: name,
+ ContentType: contentType,
+ ContentEncoding: contentEncoding,
+ ACL: getObjectACL(predefinedACL),
+ Metadata: metaData,
+ },
+ Content: infile,
+ }
+ obj, err = s.createObject(obj, backend.NoConditions{})
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return xmlResponse{errorMessage: err.Error()}
}
- w.WriteHeader(http.StatusOK)
- json.NewEncoder(w).Encode(obj)
+ defer obj.Close()
+
+ if successActionStatus == 201 {
+ objectURI := fmt.Sprintf("%s/%s%s", s.URL(), bucketName, name)
+ xmlBody := createXmlResponseBody(bucketName, obj.Etag, strings.TrimPrefix(name, "/"), objectURI)
+ return xmlResponse{status: successActionStatus, data: xmlBody}
+ }
+ return xmlResponse{status: successActionStatus}
}
-var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
+func (s *Server) wrapUploadPreconditions(r *http.Request, bucketName string, objectName string) (generationCondition, error) {
+ result := generationCondition{
+ ifGenerationMatch: nil,
+ ifGenerationNotMatch: nil,
+ }
+ ifGenerationMatch := r.URL.Query().Get("ifGenerationMatch")
+
+ if ifGenerationMatch != "" {
+ gen, err := strconv.ParseInt(ifGenerationMatch, 10, 64)
+ if err != nil {
+ return generationCondition{}, err
+ }
+ result.ifGenerationMatch = &gen
+ }
+
+ ifGenerationNotMatch := r.URL.Query().Get("ifGenerationNotMatch")
-func crc32cChecksum(content []byte) []byte {
- checksummer := crc32.New(crc32cTable)
- checksummer.Write(content)
- return checksummer.Sum(make([]byte, 0, 4))
+ if ifGenerationNotMatch != "" {
+ gen, err := strconv.ParseInt(ifGenerationNotMatch, 10, 64)
+ if err != nil {
+ return generationCondition{}, err
+ }
+ result.ifGenerationNotMatch = &gen
+ }
+
+ return result, nil
}
-func encodedChecksum(checksum []byte) string {
- return base64.StdEncoding.EncodeToString(checksum)
+func (s *Server) simpleUpload(bucketName string, r *http.Request) jsonResponse {
+ defer r.Body.Close()
+ name := r.URL.Query().Get("name")
+ predefinedACL := r.URL.Query().Get("predefinedAcl")
+ contentEncoding := r.URL.Query().Get("contentEncoding")
+ customTime := r.URL.Query().Get("customTime")
+ if name == "" {
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "name is required for simple uploads",
+ }
+ }
+ obj := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: name,
+ ContentType: r.Header.Get(contentTypeHeader),
+ CacheControl: r.Header.Get(cacheControlHeader),
+ ContentEncoding: contentEncoding,
+ CustomTime: convertTimeWithoutError(customTime),
+ ACL: getObjectACL(predefinedACL),
+ },
+ Content: notImplementedSeeker{r.Body},
+ }
+ obj, err := s.createObject(obj, backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ obj.Close()
+ return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)}
}
-func encodedCrc32cChecksum(content []byte) string {
- return encodedChecksum(crc32cChecksum(content))
+type notImplementedSeeker struct {
+ io.ReadCloser
}
-func md5Hash(b []byte) []byte {
- /* #nosec G401 */
- h := md5.New()
- h.Write(b)
- return h.Sum(nil)
+func (s notImplementedSeeker) Seek(offset int64, whence int) (int64, error) {
+ return 0, errors.New("not implemented")
}
-func encodedHash(hash []byte) string {
- return base64.StdEncoding.EncodeToString(hash)
+func (s *Server) signedUpload(bucketName string, r *http.Request) jsonResponse {
+ defer r.Body.Close()
+ name := unescapeMuxVars(mux.Vars(r))["objectName"]
+ predefinedACL := r.URL.Query().Get("predefinedAcl")
+ contentEncoding := r.URL.Query().Get("contentEncoding")
+ customTime := r.URL.Query().Get("customTime")
+
+ // Load data from HTTP Headers
+ if contentEncoding == "" {
+ contentEncoding = r.Header.Get("Content-Encoding")
+ }
+
+ metaData := make(map[string]string)
+ for key := range r.Header {
+ lowerKey := strings.ToLower(key)
+ if metaDataKey := strings.TrimPrefix(lowerKey, "x-goog-meta-"); metaDataKey != lowerKey {
+ metaData[metaDataKey] = r.Header.Get(key)
+ }
+ }
+
+ obj := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: name,
+ ContentType: r.Header.Get(contentTypeHeader),
+ ContentEncoding: contentEncoding,
+ CustomTime: convertTimeWithoutError(customTime),
+ ACL: getObjectACL(predefinedACL),
+ Metadata: metaData,
+ },
+ Content: notImplementedSeeker{r.Body},
+ }
+ obj, err := s.createObject(obj, backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ obj.Close()
+ return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)}
}
-func encodedMd5Hash(content []byte) string {
- return encodedHash(md5Hash(content))
+func getObjectACL(predefinedACL string) []storage.ACLRule {
+ if predefinedACL == "publicRead" {
+ return []storage.ACLRule{
+ {
+ Entity: "allUsers",
+ Role: "READER",
+ },
+ }
+ }
+
+ return []storage.ACLRule{
+ {
+ Entity: "projectOwner-test-project",
+ Role: "OWNER",
+ },
+ }
}
-func (s *Server) multipartUpload(bucketName string, w http.ResponseWriter, r *http.Request) {
+func (s *Server) multipartUpload(bucketName string, r *http.Request) jsonResponse {
defer r.Body.Close()
- _, params, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ params, err := parseContentTypeParams(r.Header.Get(contentTypeHeader))
if err != nil {
- http.Error(w, "invalid Content-Type header", http.StatusBadRequest)
- return
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: "invalid Content-Type header",
+ }
}
var (
metadata *multipartMetadata
content []byte
)
+ var contentType string
reader := multipart.NewReader(r.Body, params["boundary"])
+
+ var partReaders []io.Reader
+
part, err := reader.NextPart()
for ; err == nil; part, err = reader.NextPart() {
if metadata == nil {
metadata, err = loadMetadata(part)
+ contentType = metadata.ContentType
} else {
+ contentType = part.Header.Get(contentTypeHeader)
content, err = loadContent(part)
+ partReaders = append(partReaders, bytes.NewReader(content))
}
if err != nil {
break
}
}
if err != io.EOF {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{errorMessage: err.Error()}
+ }
+
+ objName := r.URL.Query().Get("name")
+ predefinedACL := r.URL.Query().Get("predefinedAcl")
+ if objName == "" {
+ objName = metadata.Name
}
- obj := Object{BucketName: bucketName, Name: metadata.Name, Content: content, Crc32c: encodedCrc32cChecksum(content), Md5Hash: encodedMd5Hash(content)}
- err = s.createObject(obj)
+
+ conditions, err := s.wrapUploadPreconditions(r, bucketName, objName)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{
+ status: http.StatusBadRequest,
+ errorMessage: err.Error(),
+ }
+ }
+
+ obj := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: objName,
+ ContentType: contentType,
+ CacheControl: metadata.CacheControl,
+ ContentEncoding: metadata.ContentEncoding,
+ ContentDisposition: metadata.ContentDisposition,
+ CustomTime: metadata.CustomTime,
+ ACL: getObjectACL(predefinedACL),
+ Metadata: metadata.Metadata,
+ },
+ Content: notImplementedSeeker{io.NopCloser(io.MultiReader(partReaders...))},
}
- w.WriteHeader(http.StatusOK)
- json.NewEncoder(w).Encode(obj)
+
+ obj, err = s.createObject(obj, conditions)
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ defer obj.Close()
+ return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)}
}
-func (s *Server) resumableUpload(bucketName string, w http.ResponseWriter, r *http.Request) {
- objName := r.URL.Query().Get("name")
- if objName == "" {
- metadata, err := loadMetadata(r.Body)
+func parseContentTypeParams(requestContentType string) (map[string]string, error) {
+ requestContentType = gsutilBoundary.ReplaceAllString(requestContentType, `boundary="$1"`)
+ _, params, err := mime.ParseMediaType(requestContentType)
+ return params, err
+}
+
+func (s *Server) resumableUpload(bucketName string, r *http.Request) jsonResponse {
+ if r.URL.Query().Has("upload_id") {
+ return s.uploadFileContent(r)
+ }
+ predefinedACL := r.URL.Query().Get("predefinedAcl")
+ contentEncoding := r.URL.Query().Get("contentEncoding")
+ metadata := new(multipartMetadata)
+ if r.Body != http.NoBody {
+ var err error
+ metadata, err = loadMetadata(r.Body)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{errorMessage: err.Error()}
}
+ }
+ objName := r.URL.Query().Get("name")
+ if objName == "" {
objName = metadata.Name
}
- obj := Object{BucketName: bucketName, Name: objName}
+ if contentEncoding == "" {
+ contentEncoding = metadata.ContentEncoding
+ }
+ obj := Object{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: objName,
+ ContentType: metadata.ContentType,
+ CacheControl: metadata.CacheControl,
+ ContentEncoding: contentEncoding,
+ CustomTime: metadata.CustomTime,
+ ACL: getObjectACL(predefinedACL),
+ Metadata: metadata.Metadata,
+ },
+ }
uploadID, err := generateUploadID()
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{errorMessage: err.Error()}
+ }
+ s.uploads.Store(uploadID, obj)
+ header := make(http.Header)
+ location := fmt.Sprintf(
+ "%s/upload/storage/v1/b/%s/o?uploadType=resumable&name=%s&upload_id=%s",
+ s.URL(),
+ bucketName,
+ url.PathEscape(objName),
+ uploadID,
+ )
+ header.Set("Location", location)
+ if r.Header.Get("X-Goog-Upload-Command") == "start" {
+ header.Set("X-Goog-Upload-URL", location)
+ header.Set("X-Goog-Upload-Status", "active")
+ }
+ return jsonResponse{
+ data: newObjectResponse(obj.ObjectAttrs, s.externalURL),
+ header: header,
}
- s.uploads[uploadID] = obj
- w.Header().Set("Location", s.URL()+"/upload/resumable/"+uploadID)
- w.WriteHeader(http.StatusOK)
- json.NewEncoder(w).Encode(obj)
}
-func (s *Server) uploadFileContent(w http.ResponseWriter, r *http.Request) {
- uploadID := mux.Vars(r)["uploadId"]
- s.mtx.Lock()
- defer s.mtx.Unlock()
- obj, ok := s.uploads[uploadID]
+// uploadFileContent accepts a chunk of a resumable upload
+//
+// A resumable upload is sent in one or more chunks. The request's
+// "Content-Range" header is used to determine if more data is expected.
+//
+// When sending streaming content, the total size is unknown until the stream
+// is exhausted. The Go client always sends streaming content. The sequence of
+// "Content-Range" headers for 2600-byte content sent in 1000-byte chunks are:
+//
+// Content-Range: bytes 0-999/*
+// Content-Range: bytes 1000-1999/*
+// Content-Range: bytes 2000-2599/*
+// Content-Range: bytes */2600
+//
+// When sending chunked content of a known size, the total size is sent as
+// well. The Python client uses this method to upload files and in-memory
+// content. The sequence of "Content-Range" headers for the 2600-byte content
+// sent in 1000-byte chunks are:
+//
+// Content-Range: bytes 0-999/2600
+// Content-Range: bytes 1000-1999/2600
+// Content-Range: bytes 2000-2599/2600
+//
+// The server collects the content, analyzes the "Content-Range", and returns a
+// "308 Permanent Redirect" response if more chunks are expected, and a
+// "200 OK" response if the upload is complete (the Go client also accepts a
+// "201 Created" response). The "Range" header in the response should be set to
+// the size of the content received so far, such as:
+//
+// Range: bytes 0-2000
+//
+// The client (such as the Go client) can send a header "X-Guploader-No-308" if
+// it can't process a native "308 Permanent Redirect". The in-process response
+// then has a status of "200 OK", with a header "X-Http-Status-Code-Override"
+// set to "308".
+func (s *Server) uploadFileContent(r *http.Request) jsonResponse {
+ uploadID := r.URL.Query().Get("upload_id")
+ rawObj, ok := s.uploads.Load(uploadID)
if !ok {
- http.Error(w, "upload not found", http.StatusNotFound)
- return
+ return jsonResponse{status: http.StatusNotFound}
}
+ obj := rawObj.(Object)
+ // TODO: stream upload file content to and from disk (when using the FS
+ // backend, at least) instead of loading the entire content into memory.
content, err := loadContent(r.Body)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return jsonResponse{errorMessage: err.Error()}
}
commit := true
- status := http.StatusCreated
- objLength := len(obj.Content)
+ status := http.StatusOK
obj.Content = append(obj.Content, content...)
- obj.Crc32c = encodedCrc32cChecksum(obj.Content)
- obj.Md5Hash = encodedMd5Hash(obj.Content)
+ obj.Crc32c = checksum.EncodedCrc32cChecksum(obj.Content)
+ obj.Md5Hash = checksum.EncodedMd5Hash(obj.Content)
+ obj.Etag = obj.Md5Hash
+ contentTypeHeader := r.Header.Get(contentTypeHeader)
+ if contentTypeHeader != "" {
+ obj.ContentType = contentTypeHeader
+ } else {
+ obj.ContentType = "application/octet-stream"
+ }
+ responseHeader := make(http.Header)
if contentRange := r.Header.Get("Content-Range"); contentRange != "" {
- commit, err = parseRange(contentRange, objLength, len(content), w)
+ parsed, err := parseContentRange(contentRange)
if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
+ return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest}
+ }
+ if parsed.KnownRange {
+ // Middle of streaming request, or any part of chunked request
+ responseHeader.Set("Range", fmt.Sprintf("bytes=0-%d", parsed.End))
+ // Complete if the range covers the known total
+ commit = parsed.KnownTotal && (parsed.End+1 >= parsed.Total)
+ } else {
+ // End of a streaming request
+ responseHeader.Set("Range", fmt.Sprintf("bytes=0-%d", len(obj.Content)))
}
}
if commit {
- delete(s.uploads, uploadID)
- err = s.createObject(obj)
+ s.uploads.Delete(uploadID)
+ streamingObject, err := s.createObject(obj.StreamingObject(), backend.NoConditions{})
+ if err != nil {
+ return errToJsonResponse(err)
+ }
+ defer streamingObject.Close()
+ obj, err = streamingObject.BufferedObject()
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return errToJsonResponse(err)
}
} else {
- status = http.StatusOK
- w.Header().Set("X-Http-Status-Code-Override", "308")
- s.uploads[uploadID] = obj
- }
- data, _ := json.Marshal(obj)
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Content-Length", strconv.Itoa(len(data)))
- w.WriteHeader(status)
- w.Write(data)
+ if _, no308 := r.Header["X-Guploader-No-308"]; no308 {
+ // Go client
+ responseHeader.Set("X-Http-Status-Code-Override", "308")
+ } else {
+ // Python client
+ status = http.StatusPermanentRedirect
+ }
+ s.uploads.Store(uploadID, obj)
+ }
+ if r.Header.Get("X-Goog-Upload-Command") == "upload, finalize" {
+ responseHeader.Set("X-Goog-Upload-Status", "final")
+ }
+ return jsonResponse{
+ status: status,
+ data: newObjectResponse(obj.ObjectAttrs, s.externalURL),
+ header: responseHeader,
+ }
}
-func parseRange(r string, objLength, bodyLength int, w http.ResponseWriter) (finished bool, err error) {
+// Parse a Content-Range header
+// Some possible valid header values:
+//
+// bytes 0-1023/4096 (first 1024 bytes of a 4096-byte document)
+// bytes 1024-2047/* (second 1024 bytes of a streaming document)
+// bytes */4096 (The end of 4096 byte streaming document)
+// bytes 0-*/* (start and end of a streaming document as sent by nodeJS client lib)
+// bytes */* (start and end of a streaming document as sent by the C++ SDK)
+func parseContentRange(r string) (parsed contentRange, err error) {
invalidErr := fmt.Errorf("invalid Content-Range: %v", r)
+
+ // Require that units == "bytes"
const bytesPrefix = "bytes "
- var contentLength int
if !strings.HasPrefix(r, bytesPrefix) {
- return false, invalidErr
+ return parsed, invalidErr
}
+
+ // Split range from total length
parts := strings.SplitN(r[len(bytesPrefix):], "/", 2)
if len(parts) != 2 {
- return false, invalidErr
+ return parsed, invalidErr
}
- var rangeStart, rangeEnd int
+ // Process range
if parts[0] == "*" {
- rangeStart = objLength
- rangeEnd = objLength + bodyLength
+ parsed.Start = -1
+ parsed.End = -1
} else {
rangeParts := strings.SplitN(parts[0], "-", 2)
if len(rangeParts) != 2 {
- return false, invalidErr
+ return parsed, invalidErr
}
- rangeStart, err = strconv.Atoi(rangeParts[0])
+
+ parsed.Start, err = strconv.Atoi(rangeParts[0])
if err != nil {
- return false, invalidErr
+ return parsed, invalidErr
}
- rangeEnd, err = strconv.Atoi(rangeParts[1])
- if err != nil {
- return false, invalidErr
+
+ if rangeParts[1] == "*" {
+ parsed.End = -1
+ } else {
+ parsed.KnownRange = true
+ parsed.End, err = strconv.Atoi(rangeParts[1])
+ if err != nil {
+ return parsed, invalidErr
+ }
}
}
- contentLength = objLength + bodyLength
- finished = rangeEnd == contentLength
- w.Header().Set("Range", fmt.Sprintf("bytes=%d-%d", rangeStart, rangeEnd))
+ // Process total length
+ if parts[1] == "*" {
+ parsed.Total = -1
+ } else {
+ parsed.KnownTotal = true
+ parsed.Total, err = strconv.Atoi(parts[1])
+ if err != nil {
+ return parsed, invalidErr
+ }
+ }
- return finished, nil
+ return parsed, nil
}
func loadMetadata(rc io.ReadCloser) (*multipartMetadata, error) {
@@ -255,7 +639,7 @@ func loadMetadata(rc io.ReadCloser) (*multipartMetadata, error) {
func loadContent(rc io.ReadCloser) ([]byte, error) {
defer rc.Close()
- return ioutil.ReadAll(rc)
+ return io.ReadAll(rc)
}
func generateUploadID() (string, error) {
diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go
new file mode 100644
index 0000000000000..50d9661df84c8
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go
@@ -0,0 +1,123 @@
+package fakestorage
+
+import (
+ "encoding/xml"
+ "net/http"
+ "strings"
+)
+
+type xmlResponse struct {
+ status int
+ header http.Header
+ data any
+ errorMessage string
+}
+
+type xmlResponseBody struct {
+ XMLName xml.Name `xml:"PostResponse"`
+ Bucket string
+ Etag struct {
+ Value string `xml:",innerxml"`
+ }
+ Key string
+ Location string
+}
+
+type ListBucketResult struct {
+ XMLName xml.Name `xml:"ListBucketResult"`
+ Name string `xml:"Name"`
+ CommonPrefixes []CommonPrefix `xml:"CommonPrefixes,omitempty"`
+ Delimiter string `xml:"Delimiter"`
+ Prefix string `xml:"Prefix"`
+ KeyCount int `xml:"KeyCount"`
+ Contents []Contents `xml:"Contents"`
+}
+
+type Contents struct {
+ XMLName xml.Name `xml:"Contents"`
+ Key string `xml:"Key"`
+ Generation int64 `xml:"Generation"`
+ LastModified string `xml:"LastModified"`
+ ETag ETag
+ Size int64 `xml:"Size"`
+}
+
+type CommonPrefix struct {
+ Prefix string `xml:"Prefix"`
+}
+
+type ETag struct {
+ Value string `xml:",innerxml"`
+}
+
+func (e *ETag) Equals(etag string) bool {
+ trim := func(s string) string {
+ return strings.TrimPrefix(strings.TrimSuffix(s, "\""), "\"")
+ }
+ return trim(e.Value) == trim(etag)
+}
+
+type xmlHandler = func(r *http.Request) xmlResponse
+
+func xmlToHTTPHandler(h xmlHandler) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ resp := h(r)
+ w.Header().Set("Content-Type", "application/xml")
+ for name, values := range resp.header {
+ for _, value := range values {
+ w.Header().Add(name, value)
+ }
+ }
+
+ status := resp.getStatus()
+ var data any
+ if status > 399 {
+ data = newErrorResponse(status, resp.getErrorMessage(status), nil)
+ } else {
+ data = resp.data
+ }
+
+ w.WriteHeader(status)
+
+ dataBytes, ok := data.([]byte)
+ if ok {
+ w.Write(dataBytes)
+ } else {
+ xml.NewEncoder(w).Encode(data)
+ }
+ }
+}
+
+func createXmlResponseBody(bucketName, etag, key, location string) []byte {
+ responseBody := xmlResponseBody{
+ Bucket: bucketName,
+ Etag: struct {
+ Value string `xml:",innerxml"`
+ }{etag},
+ Location: location,
+ Key: key,
+ }
+ x, err := xml.Marshal(responseBody)
+ if err != nil {
+ return nil
+ }
+
+ return []byte(xml.Header + string(x))
+}
+
+func (r *xmlResponse) getStatus() int {
+ if r.status > 0 {
+ return r.status
+ }
+ if r.errorMessage != "" {
+ return http.StatusInternalServerError
+ }
+ return http.StatusOK
+}
+
+func (r *xmlResponse) getErrorMessage(status int) string {
+ if r.errorMessage != "" {
+ return r.errorMessage
+ }
+ return http.StatusText(status)
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go
new file mode 100644
index 0000000000000..e56a7aa7950a5
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import "time"
+
+// Bucket represents the bucket that is stored within the fake server.
+type Bucket struct {
+ Name string
+ VersioningEnabled bool
+ TimeCreated time.Time
+ DefaultEventBasedHold bool
+}
+
+const bucketMetadataSuffix = ".bucketMetadata"
+
+type BucketAttrs struct {
+ DefaultEventBasedHold bool
+ VersioningEnabled bool
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go
index 24b1b2cb9437e..c96867d802df6 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go
@@ -5,129 +5,465 @@
package backend
import (
+ "bytes"
"encoding/json"
+ "errors"
"fmt"
- "io/ioutil"
+ "io"
+ "io/fs"
"net/url"
"os"
- "path"
"path/filepath"
"strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/fsouza/fake-gcs-server/internal/checksum"
+ "github.com/pkg/xattr"
)
-// StorageFS is an implementation of the backend storage that stores data on disk
+// storageFS is an implementation of the backend storage that stores data on disk
+//
// The layout is the following:
+//
// - rootDir
-// |- bucket1
-// \- bucket2
-// |- object1
-// \- object2
+//
+// |- bucket1
+// \- bucket2
+// |- object1
+// \- object2
+//
// Bucket and object names are url path escaped, so there's no special meaning of forward slashes.
-type StorageFS struct {
+type storageFS struct {
rootDir string
+ mtx sync.RWMutex
+ mh metadataHandler
}
-// NewStorageFS creates an instance of StorageMemory
-func NewStorageFS(objects []Object, rootDir string) (Storage, error) {
+// NewStorageFS creates an instance of the filesystem-backed storage backend.
+func NewStorageFS(objects []StreamingObject, rootDir string) (Storage, error) {
if !strings.HasSuffix(rootDir, "/") {
rootDir += "/"
}
- s := &StorageFS{
- rootDir: rootDir,
+ err := os.MkdirAll(rootDir, 0o700)
+ if err != nil {
+ return nil, err
}
+
+ var mh metadataHandler = metadataFile{}
+ // Use xattr for metadata if rootDir supports it.
+ if xattr.XATTR_SUPPORTED {
+ xattrHandler := metadataXattr{}
+ var xerr *xattr.Error
+ _, err = xattrHandler.read(rootDir)
+ if err == nil || (errors.As(err, &xerr) && xerr.Err == xattr.ENOATTR) {
+ mh = xattrHandler
+ }
+ }
+
+ s := &storageFS{rootDir: rootDir, mh: mh}
for _, o := range objects {
- err := s.CreateObject(o)
+ obj, err := s.CreateObject(o, NoConditions{})
if err != nil {
return nil, err
}
+ obj.Close()
}
return s, nil
}
-// CreateBucket creates a bucket
-func (s *StorageFS) CreateBucket(name string) error {
- return os.MkdirAll(filepath.Join(s.rootDir, url.PathEscape(name)), 0700)
+// CreateBucket creates a bucket in the fs backend. A bucket is a folder in the
+// root directory.
+func (s *storageFS) CreateBucket(name string, bucketAttrs BucketAttrs) error {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ return s.createBucket(name, bucketAttrs)
+}
+
+func (s *storageFS) createBucket(name string, bucketAttrs BucketAttrs) error {
+ if bucketAttrs.VersioningEnabled {
+ return errors.New("not implemented: fs storage type does not support versioning yet")
+ }
+ path := filepath.Join(s.rootDir, url.PathEscape(name))
+ err := os.MkdirAll(path, 0o700)
+ if err != nil {
+ return err
+ }
+ encoded, err := json.Marshal(bucketAttrs)
+ if err != nil {
+ return err
+ }
+ return writeFile(path+bucketMetadataSuffix, encoded, 0o600)
}
-// ListBuckets lists buckets
-func (s *StorageFS) ListBuckets() ([]string, error) {
- infos, err := ioutil.ReadDir(s.rootDir)
+// ListBuckets returns a list of buckets from the list of directories in the
+// root directory.
+func (s *storageFS) ListBuckets() ([]Bucket, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ infos, err := os.ReadDir(s.rootDir)
if err != nil {
return nil, err
}
- buckets := []string{}
+ buckets := []Bucket{}
for _, info := range infos {
if info.IsDir() {
unescaped, err := url.PathUnescape(info.Name())
if err != nil {
- return nil, fmt.Errorf("failed to unescape object name %s: %s", info.Name(), err)
+ return nil, fmt.Errorf("failed to unescape object name %s: %w", info.Name(), err)
+ }
+ fileInfo, err := info.Info()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get file info for %s: %w", info.Name(), err)
}
- buckets = append(buckets, unescaped)
+ buckets = append(buckets, Bucket{Name: unescaped, TimeCreated: timespecToTime(createTimeFromFileInfo(fileInfo))})
}
}
return buckets, nil
}
-// GetBucket checks if a bucket exists
-func (s *StorageFS) GetBucket(name string) error {
- _, err := os.Stat(filepath.Join(s.rootDir, url.PathEscape(name)))
- return err
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
-// CreateObject stores an object
-func (s *StorageFS) CreateObject(obj Object) error {
- err := s.CreateBucket(obj.BucketName)
+func (s *storageFS) UpdateBucket(bucketName string, attrsToUpdate BucketAttrs) error {
+ if attrsToUpdate.VersioningEnabled {
+ return errors.New("not implemented: fs storage type does not support versioning yet")
+ }
+ encoded, err := json.Marshal(attrsToUpdate)
if err != nil {
return err
}
- encoded, err := json.Marshal(obj)
+ path := filepath.Join(s.rootDir, url.PathEscape(bucketName))
+ return writeFile(path+bucketMetadataSuffix, encoded, 0o600)
+}
+
+// GetBucket returns information about the given bucket, or an error if it
+// doesn't exist.
+func (s *storageFS) GetBucket(name string) (Bucket, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ path := filepath.Join(s.rootDir, url.PathEscape(name))
+ dirInfo, err := os.Stat(path)
if err != nil {
- return err
+ return Bucket{}, err
}
- return ioutil.WriteFile(filepath.Join(s.rootDir, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)), encoded, 0664)
+ attrs, err := getBucketAttributes(path)
+ if err != nil {
+ return Bucket{}, err
+ }
+ return Bucket{Name: name, VersioningEnabled: false, TimeCreated: timespecToTime(createTimeFromFileInfo(dirInfo)), DefaultEventBasedHold: attrs.DefaultEventBasedHold}, err
}
-// ListObjects lists the objects in a given bucket with a given prefix and delimeter
-func (s *StorageFS) ListObjects(bucketName string) ([]Object, error) {
- infos, err := ioutil.ReadDir(path.Join(s.rootDir, url.PathEscape(bucketName)))
+func getBucketAttributes(path string) (BucketAttrs, error) {
+ content, err := os.ReadFile(path + bucketMetadataSuffix)
if err != nil {
- return nil, err
+ if os.IsNotExist(err) {
+ return BucketAttrs{}, nil
+ }
+ return BucketAttrs{}, err
}
- objects := []Object{}
- for _, info := range infos {
- unescaped, err := url.PathUnescape(info.Name())
+ var attrs BucketAttrs
+ err = json.Unmarshal(content, &attrs)
+ if err != nil {
+ return BucketAttrs{}, err
+ }
+ return attrs, nil
+}
+
+// DeleteBucket removes the bucket from the backend.
+func (s *storageFS) DeleteBucket(name string) error {
+ objs, err := s.ListObjects(name, "", false)
+ if err != nil {
+ return BucketNotFound
+ }
+ if len(objs) > 0 {
+ return BucketNotEmpty
+ }
+
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ return os.RemoveAll(filepath.Join(s.rootDir, url.PathEscape(name)))
+}
+
+// CreateObject stores an object as a regular file on disk. The backing content
+// for the object may be in the same file that's being updated, so a temporary
+// file is first created and then moved into place. This also makes it so any
+// object content readers currently open continue reading from the original
+// file instead of the newly created file.
+//
+// The crc32c checksum and md5 hash of the object content is calculated when
+// reading the object content. Any checksum or hash in the passed-in object
+// metadata is overwritten.
+func (s *storageFS) CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) {
+ if obj.Generation > 0 {
+ return StreamingObject{}, errors.New("not implemented: fs storage type does not support objects generation yet")
+ }
+
+ // Note: this was a quick fix for issue #701. Now that we have a way to
+ // persist object attributes, we should implement versioning in the
+ // filesystem backend and handle generations outside of the backends.
+ obj.Generation = time.Now().UnixNano() / 1000
+
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ err := s.createBucket(obj.BucketName, BucketAttrs{VersioningEnabled: false})
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ var activeGeneration int64
+ existingObj, err := s.getObject(obj.BucketName, obj.Name)
+ if err != nil {
+ activeGeneration = 0
+ } else {
+ activeGeneration = existingObj.Generation
+ }
+
+ if !conditions.ConditionsMet(activeGeneration) {
+ return StreamingObject{}, PreConditionFailed
+ }
+
+ path := filepath.Join(s.rootDir, url.PathEscape(obj.BucketName), obj.Name)
+ if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
+ return StreamingObject{}, err
+ }
+
+ // Nothing to do if this operation only creates directories
+ if strings.HasSuffix(obj.Name, "/") {
+ // TODO: populate Crc32c, Md5Hash, and Etag
+ return StreamingObject{obj.ObjectAttrs, noopSeekCloser{bytes.NewReader([]byte{})}}, nil
+ }
+
+ var buf bytes.Buffer
+ hasher := checksum.NewStreamingHasher()
+ objectContent := io.TeeReader(obj.Content, hasher)
+
+ if _, err = io.Copy(&buf, objectContent); err != nil {
+ return StreamingObject{}, err
+ }
+
+ if obj.Crc32c == "" {
+ obj.Crc32c = hasher.EncodedCrc32cChecksum()
+ }
+ if obj.Md5Hash == "" {
+ obj.Md5Hash = hasher.EncodedMd5Hash()
+ }
+ if obj.Etag == "" {
+ obj.Etag = obj.Md5Hash
+ }
+
+ // TODO: Handle if metadata is not present more gracefully?
+ encoded, err := json.Marshal(obj.ObjectAttrs)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ if err := writeFile(path, buf.Bytes(), 0o600); err != nil {
+ return StreamingObject{}, err
+ }
+
+ if err = s.mh.write(path, encoded); err != nil {
+ return StreamingObject{}, err
+ }
+
+ err = openObjectAndSetSize(&obj, path)
+
+ return obj, err
+}
+
+// ListObjects lists the objects in a given bucket with a given prefix and
+// delimeter.
+func (s *storageFS) ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+
+ objects := []ObjectAttrs{}
+ bucketPath := filepath.Join(s.rootDir, url.PathEscape(bucketName))
+ if err := filepath.Walk(bucketPath, func(path string, info fs.FileInfo, err error) error {
if err != nil {
- return nil, fmt.Errorf("failed to unescape object name %s: %s", info.Name(), err)
+ return err
+ }
+
+ objName, _ := filepath.Rel(bucketPath, path)
+ if s.mh.isSpecialFile(info.Name()) {
+ return nil
+ }
+ if info.IsDir() {
+ return nil
}
- object, err := s.GetObject(bucketName, unescaped)
+ if prefix != "" && !strings.HasPrefix(objName, prefix) {
+ return nil
+ }
+ objAttrs, err := s.getObjectAttrs(bucketName, objName)
if err != nil {
- return nil, err
+ return err
}
- objects = append(objects, object)
+ objects = append(objects, objAttrs)
+ return nil
+ }); err != nil {
+ return nil, err
}
return objects, nil
}
-// GetObject get an object by bucket and name
-func (s *StorageFS) GetObject(bucketName, objectName string) (Object, error) {
- encoded, err := ioutil.ReadFile(filepath.Join(s.rootDir, url.PathEscape(bucketName), url.PathEscape(objectName)))
+// GetObject get an object by bucket and name.
+func (s *storageFS) GetObject(bucketName, objectName string) (StreamingObject, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ return s.getObject(bucketName, objectName)
+}
+
+// GetObjectWithGeneration retrieves an specific version of the object. Not
+// implemented for this backend.
+func (s *storageFS) GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
if err != nil {
- return Object{}, err
+ return obj, err
}
- var obj Object
- err = json.Unmarshal(encoded, &obj)
- if err != nil {
- return Object{}, err
+ if obj.Generation != generation {
+ return obj, fmt.Errorf("generation mismatch, object generation is %v, requested generation is %v (note: filesystem backend does not support versioning)", obj.Generation, generation)
}
- obj.Name = objectName
- obj.BucketName = bucketName
return obj, nil
}
-// DeleteObject deletes an object by bucket and name
-func (s *StorageFS) DeleteObject(bucketName, objectName string) error {
+func (s *storageFS) getObject(bucketName, objectName string) (StreamingObject, error) {
+ attrs, err := s.getObjectAttrs(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ obj := StreamingObject{ObjectAttrs: attrs}
+ path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName)
+ err = openObjectAndSetSize(&obj, path)
+
+ return obj, err
+}
+
+func openObjectAndSetSize(obj *StreamingObject, path string) error {
+ info, err := os.Stat(path)
+ if err != nil {
+ return err
+ }
+
+ obj.Content = newLazyReader(path)
+ obj.Size = info.Size()
+
+ return nil
+}
+
+func (s *storageFS) getObjectAttrs(bucketName, objectName string) (ObjectAttrs, error) {
+ path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName)
+ encoded, err := s.mh.read(path)
+ if err != nil {
+ return ObjectAttrs{}, err
+ }
+
+ var attrs ObjectAttrs
+ if err = json.Unmarshal(encoded, &attrs); err != nil {
+ return ObjectAttrs{}, err
+ }
+
+ info, err := os.Stat(path)
+ if err != nil {
+ return ObjectAttrs{}, fmt.Errorf("failed to stat: %w", err)
+ }
+
+ attrs.Name = filepath.ToSlash(objectName)
+ attrs.BucketName = bucketName
+ attrs.Size = info.Size()
+ return attrs, nil
+}
+
+// DeleteObject deletes an object by bucket and name.
+func (s *storageFS) DeleteObject(bucketName, objectName string) error {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
if objectName == "" {
- return fmt.Errorf("can't delete object with empty name")
+ return errors.New("can't delete object with empty name")
}
- return os.Remove(filepath.Join(s.rootDir, url.PathEscape(bucketName), url.PathEscape(objectName)))
+ path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName)
+ if err := s.mh.remove(path); err != nil {
+ return err
+ }
+ return os.Remove(path)
+}
+
+func (s *storageFS) PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ defer obj.Close()
+
+ obj.patch(attrsToUpdate)
+ obj.Generation = 0 // reset generation id
+ return s.CreateObject(obj, NoConditions{})
+}
+
+func (s *storageFS) UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ defer obj.Close()
+
+ if attrsToUpdate.Metadata != nil {
+ obj.Metadata = map[string]string{}
+ }
+ obj.patch(attrsToUpdate)
+ obj.Generation = 0 // reset generation id
+ return s.CreateObject(obj, NoConditions{})
+}
+
+type concatenatedContent struct {
+ io.Reader
+}
+
+func (c concatenatedContent) Close() error {
+ return errors.New("not implemented")
+}
+
+func (c concatenatedContent) Seek(offset int64, whence int) (int64, error) {
+ return 0, errors.New("not implemented")
+}
+
+func concatObjectReaders(objects []StreamingObject) io.ReadSeekCloser {
+ readers := make([]io.Reader, len(objects))
+ for i := range objects {
+ readers[i] = objects[i].Content
+ }
+ return concatenatedContent{io.MultiReader(readers...)}
+}
+
+func (s *storageFS) ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) {
+ var sourceObjects []StreamingObject
+ for _, n := range objectNames {
+ obj, err := s.GetObject(bucketName, n)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ defer obj.Close()
+ sourceObjects = append(sourceObjects, obj)
+ }
+
+ dest := StreamingObject{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: destinationName,
+ ContentType: contentType,
+ Created: time.Now().String(),
+ },
+ }
+
+ dest.Content = concatObjectReaders(sourceObjects)
+ dest.Metadata = metadata
+
+ result, err := s.CreateObject(dest, NoConditions{})
+ if err != nil {
+ return result, err
+ }
+
+ return result, nil
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go
new file mode 100644
index 0000000000000..8c30a3149213c
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go
@@ -0,0 +1,53 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type lazyReader struct {
+ filename string
+ once *sync.Once
+ f *os.File
+ err error
+}
+
+func newLazyReader(filename string) io.ReadSeekCloser {
+ return &lazyReader{
+ filename: filename,
+ once: &sync.Once{},
+ }
+}
+
+func (r *lazyReader) open() {
+ r.f, r.err = os.Open(r.filename)
+}
+
+func (r *lazyReader) Read(p []byte) (int, error) {
+ r.once.Do(r.open)
+ if r.err != nil {
+ return 0, r.err
+ }
+ return r.f.Read(p)
+}
+
+func (r *lazyReader) Seek(offset int64, whence int) (int64, error) {
+ r.once.Do(r.open)
+ if r.err != nil {
+ return 0, r.err
+ }
+ return r.f.Seek(offset, whence)
+}
+
+func (r *lazyReader) Close() error {
+ r.once.Do(r.open)
+ if r.err != nil {
+ return r.err
+ }
+ return r.f.Close()
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go
index 257843ad36308..c32f06abf2cd4 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go
@@ -7,118 +7,386 @@ package backend
import (
"errors"
"fmt"
+ "io"
+ "strings"
"sync"
+ "time"
+
+ "github.com/fsouza/fake-gcs-server/internal/checksum"
)
-// StorageMemory is an implementation of the backend storage that stores data in memory
-type StorageMemory struct {
- buckets map[string][]Object
+const timestampFormat = "2006-01-02T15:04:05.999999Z07:00"
+
+// storageMemory is an implementation of the backend storage that stores data
+// in memory.
+type storageMemory struct {
+ buckets map[string]bucketInMemory
mtx sync.RWMutex
}
-// NewStorageMemory creates an instance of StorageMemory
-func NewStorageMemory(objects []Object) Storage {
- s := &StorageMemory{
- buckets: make(map[string][]Object),
+type bucketInMemory struct {
+ Bucket
+ // maybe we can refactor how the memory backend works? no need to store
+ // Object instances.
+ activeObjects []Object
+ archivedObjects []Object
+}
+
+func newBucketInMemory(name string, versioningEnabled bool, bucketAttrs BucketAttrs) bucketInMemory {
+ return bucketInMemory{Bucket{name, versioningEnabled, time.Now(), bucketAttrs.DefaultEventBasedHold}, []Object{}, []Object{}}
+}
+
+func (bm *bucketInMemory) addObject(obj Object) Object {
+ if obj.Crc32c == "" {
+ obj.Crc32c = checksum.EncodedCrc32cChecksum(obj.Content)
+ }
+ if obj.Md5Hash == "" {
+ obj.Md5Hash = checksum.EncodedMd5Hash(obj.Content)
+ }
+ if obj.Etag == "" {
+ obj.Etag = obj.Md5Hash
+ }
+ if obj.Size == 0 {
+ obj.Size = int64(len(obj.Content))
+ }
+ obj.Generation = getNewGenerationIfZero(obj.Generation)
+ index := findObject(obj, bm.activeObjects, false)
+ if index >= 0 {
+ if bm.VersioningEnabled {
+ bm.activeObjects[index].Deleted = time.Now().Format(timestampFormat)
+ bm.cpToArchive(bm.activeObjects[index])
+ }
+ bm.activeObjects[index] = obj
+ } else {
+ bm.activeObjects = append(bm.activeObjects, obj)
+ }
+
+ return obj
+}
+
+func getNewGenerationIfZero(generation int64) int64 {
+ if generation == 0 {
+ return time.Now().UnixNano() / 1000
+ }
+ return generation
+}
+
+func (bm *bucketInMemory) deleteObject(obj Object, matchGeneration bool) {
+ index := findObject(obj, bm.activeObjects, matchGeneration)
+ if index < 0 {
+ return
+ }
+ if bm.VersioningEnabled {
+ obj.Deleted = time.Now().Format(timestampFormat)
+ bm.mvToArchive(obj)
+ } else {
+ bm.deleteFromObjectList(obj, true)
+ }
+}
+
+func (bm *bucketInMemory) cpToArchive(obj Object) {
+ bm.archivedObjects = append(bm.archivedObjects, obj)
+}
+
+func (bm *bucketInMemory) mvToArchive(obj Object) {
+ bm.cpToArchive(obj)
+ bm.deleteFromObjectList(obj, true)
+}
+
+func (bm *bucketInMemory) deleteFromObjectList(obj Object, active bool) {
+ objects := bm.activeObjects
+ if !active {
+ objects = bm.archivedObjects
+ }
+ index := findObject(obj, objects, !active)
+ objects[index] = objects[len(objects)-1]
+ if active {
+ bm.activeObjects = objects[:len(objects)-1]
+ } else {
+ bm.archivedObjects = objects[:len(objects)-1]
+ }
+}
+
+// findObject looks for an object in the given list and return the index where it
+// was found, or -1 if the object doesn't exist.
+func findObject(obj Object, objectList []Object, matchGeneration bool) int {
+ for i, o := range objectList {
+ if matchGeneration && obj.ID() == o.ID() {
+ return i
+ }
+ if !matchGeneration && obj.IDNoGen() == o.IDNoGen() {
+ return i
+ }
+ }
+ return -1
+}
+
+// findLastObjectGeneration looks for an object in the given list and return the index where it
+// was found, or -1 if the object doesn't exist.
+func findLastObjectGeneration(obj Object, objectList []Object) int64 {
+ highScore := int64(0)
+ for _, o := range objectList {
+ if obj.IDNoGen() == o.IDNoGen() && o.Generation > highScore {
+ highScore = o.Generation
+ }
+ }
+ return highScore
+}
+
+// NewStorageMemory creates an instance of StorageMemory.
+func NewStorageMemory(objects []StreamingObject) (Storage, error) {
+ s := &storageMemory{
+ buckets: make(map[string]bucketInMemory),
}
for _, o := range objects {
- s.buckets[o.BucketName] = append(s.buckets[o.BucketName], o)
+ bufferedObject, err := o.BufferedObject()
+ if err != nil {
+ return nil, err
+ }
+ s.CreateBucket(o.BucketName, BucketAttrs{false, false})
+ bucket := s.buckets[o.BucketName]
+ bucket.addObject(bufferedObject)
+ s.buckets[o.BucketName] = bucket
}
- return s
+ return s, nil
}
-// CreateBucket creates a bucket
-func (s *StorageMemory) CreateBucket(name string) error {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- if _, ok := s.buckets[name]; !ok {
- s.buckets[name] = nil
+func (s *storageMemory) UpdateBucket(bucketName string, attrsToUpdate BucketAttrs) error {
+ bucketInMemory, err := s.getBucketInMemory(bucketName)
+ if err != nil {
+ return err
}
+ bucketInMemory.DefaultEventBasedHold = attrsToUpdate.DefaultEventBasedHold
+ bucketInMemory.VersioningEnabled = attrsToUpdate.VersioningEnabled
+ s.buckets[bucketName] = bucketInMemory
return nil
}
-// ListBuckets lists buckets
-func (s *StorageMemory) ListBuckets() ([]string, error) {
+// CreateBucket creates a bucket.
+func (s *storageMemory) CreateBucket(name string, bucketAttrs BucketAttrs) error {
s.mtx.Lock()
defer s.mtx.Unlock()
- buckets := []string{}
- for bucket := range s.buckets {
- buckets = append(buckets, bucket)
+ bucket, err := s.getBucketInMemory(name)
+ if err == nil {
+ if bucket.VersioningEnabled != bucketAttrs.VersioningEnabled {
+ return fmt.Errorf("a bucket named %s already exists, but with different properties", name)
+ }
+ return nil
+ }
+ s.buckets[name] = newBucketInMemory(name, bucketAttrs.VersioningEnabled, bucketAttrs)
+ return nil
+}
+
+// ListBuckets lists buckets currently registered in the backend.
+func (s *storageMemory) ListBuckets() ([]Bucket, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ buckets := []Bucket{}
+ for _, bucketInMemory := range s.buckets {
+ buckets = append(buckets, Bucket{bucketInMemory.Name, bucketInMemory.VersioningEnabled, bucketInMemory.TimeCreated, false})
}
return buckets, nil
}
-// GetBucket checks if a bucket exists
-func (s *StorageMemory) GetBucket(name string) error {
- s.mtx.Lock()
- defer s.mtx.Unlock()
+// GetBucket retrieves the bucket information from the backend.
+func (s *storageMemory) GetBucket(name string) (Bucket, error) {
+ s.mtx.RLock()
+ defer s.mtx.RUnlock()
+ bucketInMemory, err := s.getBucketInMemory(name)
+ return Bucket{bucketInMemory.Name, bucketInMemory.VersioningEnabled, bucketInMemory.TimeCreated, bucketInMemory.DefaultEventBasedHold}, err
+}
- if _, ok := s.buckets[name]; !ok {
- return fmt.Errorf("no bucket named %s", name)
+func (s *storageMemory) getBucketInMemory(name string) (bucketInMemory, error) {
+ if bucketInMemory, found := s.buckets[name]; found {
+ return bucketInMemory, nil
}
- return nil
+ return bucketInMemory{}, fmt.Errorf("no bucket named %s", name)
}
-// CreateObject stores an object
-func (s *StorageMemory) CreateObject(obj Object) error {
+// DeleteBucket removes the bucket from the backend.
+func (s *storageMemory) DeleteBucket(name string) error {
+ objs, err := s.ListObjects(name, "", false)
+ if err != nil {
+ return BucketNotFound
+ }
+ if len(objs) > 0 {
+ return BucketNotEmpty
+ }
+
s.mtx.Lock()
defer s.mtx.Unlock()
-
- index := s.findObject(obj)
- if index < 0 {
- s.buckets[obj.BucketName] = append(s.buckets[obj.BucketName], obj)
- } else {
- s.buckets[obj.BucketName][index] = obj
- }
+ delete(s.buckets, name)
return nil
}
-// findObject looks for an object in its bucket and return the index where it
-// was found, or -1 if the object doesn't exist.
-//
-// It doesn't lock the mutex, callers must lock the mutex before calling this
-// method.
-func (s *StorageMemory) findObject(obj Object) int {
- for i, o := range s.buckets[obj.BucketName] {
- if obj.ID() == o.ID() {
- return i
- }
+// CreateObject stores an object in the backend.
+func (s *storageMemory) CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) {
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ bucketInMemory, err := s.getBucketInMemory(obj.BucketName)
+ if err != nil {
+ bucketInMemory = newBucketInMemory(obj.BucketName, false, BucketAttrs{})
}
- return -1
+ bufferedObj, err := obj.BufferedObject()
+ currentGeneration := findLastObjectGeneration(bufferedObj, bucketInMemory.activeObjects)
+ if !conditions.ConditionsMet(currentGeneration) {
+ return StreamingObject{}, PreConditionFailed
+ }
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ newObj := bucketInMemory.addObject(bufferedObj)
+ s.buckets[obj.BucketName] = bucketInMemory
+ return newObj.StreamingObject(), nil
}
-// ListObjects lists the objects in a given bucket with a given prefix and delimeter
-func (s *StorageMemory) ListObjects(bucketName string) ([]Object, error) {
+// ListObjects lists the objects in a given bucket with a given prefix and
+// delimiter.
+func (s *storageMemory) ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
- objects, ok := s.buckets[bucketName]
- if !ok {
- return nil, errors.New("bucket not found")
+ bucketInMemory, err := s.getBucketInMemory(bucketName)
+ if err != nil {
+ return []ObjectAttrs{}, err
+ }
+ objAttrs := make([]ObjectAttrs, 0, len(bucketInMemory.activeObjects))
+ for _, obj := range bucketInMemory.activeObjects {
+ if prefix != "" && !strings.HasPrefix(obj.Name, prefix) {
+ continue
+ }
+ objAttrs = append(objAttrs, obj.ObjectAttrs)
+ }
+ if !versions {
+ return objAttrs, nil
}
- return objects, nil
+
+ archvObjs := make([]ObjectAttrs, 0, len(bucketInMemory.archivedObjects))
+ for _, obj := range bucketInMemory.archivedObjects {
+ if prefix != "" && !strings.HasPrefix(obj.Name, prefix) {
+ continue
+ }
+ archvObjs = append(archvObjs, obj.ObjectAttrs)
+ }
+ return append(objAttrs, archvObjs...), nil
}
-// GetObject get an object by bucket and name
-func (s *StorageMemory) GetObject(bucketName, objectName string) (Object, error) {
- obj := Object{BucketName: bucketName, Name: objectName}
+func (s *storageMemory) GetObject(bucketName, objectName string) (StreamingObject, error) {
+ return s.GetObjectWithGeneration(bucketName, objectName, 0)
+}
+
+// GetObjectWithGeneration retrieves a specific version of the object.
+func (s *storageMemory) GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) {
s.mtx.RLock()
defer s.mtx.RUnlock()
- index := s.findObject(obj)
+ bucketInMemory, err := s.getBucketInMemory(bucketName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ matchGeneration := false
+ obj := Object{ObjectAttrs: ObjectAttrs{BucketName: bucketName, Name: objectName}}
+ listToConsider := bucketInMemory.activeObjects
+ if generation != 0 {
+ matchGeneration = true
+ obj.Generation = generation
+ listToConsider = append(listToConsider, bucketInMemory.archivedObjects...)
+ }
+ index := findObject(obj, listToConsider, matchGeneration)
if index < 0 {
- return obj, errors.New("object not found")
+ return obj.StreamingObject(), errors.New("object not found")
}
- return s.buckets[bucketName][index], nil
+
+ return listToConsider[index].StreamingObject(), nil
}
-// DeleteObject deletes an object by bucket and name
-func (s *StorageMemory) DeleteObject(bucketName, objectName string) error {
- obj := Object{BucketName: bucketName, Name: objectName}
- index := s.findObject(obj)
- if index < 0 {
- return fmt.Errorf("no such object in bucket %s: %s", bucketName, objectName)
+func (s *storageMemory) DeleteObject(bucketName, objectName string) error {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return err
+ }
+ s.mtx.Lock()
+ defer s.mtx.Unlock()
+ bucketInMemory, err := s.getBucketInMemory(bucketName)
+ if err != nil {
+ return err
+ }
+ bufferedObject, err := obj.BufferedObject()
+ if err != nil {
+ return err
}
- bucket := s.buckets[obj.BucketName]
- bucket[index] = bucket[len(bucket)-1]
- s.buckets[obj.BucketName] = bucket[:len(bucket)-1]
+ bucketInMemory.deleteObject(bufferedObject, true)
+ s.buckets[bucketName] = bucketInMemory
return nil
}
+
+func (s *storageMemory) PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ obj.patch(attrsToUpdate)
+ s.CreateObject(obj, NoConditions{})
+ return obj, nil
+}
+
+// UpdateObject replaces an object metadata, custom time, and acl.
+func (s *storageMemory) UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) {
+ obj, err := s.GetObject(bucketName, objectName)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+
+ if attrsToUpdate.Metadata != nil {
+ obj.Metadata = map[string]string{}
+ }
+ obj.patch(attrsToUpdate)
+ s.CreateObject(obj, NoConditions{})
+ return obj, nil
+}
+
+func (s *storageMemory) ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) {
+ var data []byte
+ for _, n := range objectNames {
+ obj, err := s.GetObject(bucketName, n)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ objectContent, err := io.ReadAll(obj.Content)
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ data = append(data, objectContent...)
+ }
+
+ var dest Object
+ streamingDest, err := s.GetObject(bucketName, destinationName)
+ if err != nil {
+ dest = Object{
+ ObjectAttrs: ObjectAttrs{
+ BucketName: bucketName,
+ Name: destinationName,
+ ContentType: contentType,
+ Created: time.Now().String(),
+ },
+ }
+ } else {
+ dest, err = streamingDest.BufferedObject()
+ if err != nil {
+ return StreamingObject{}, err
+ }
+ }
+
+ dest.Content = data
+ dest.Crc32c = ""
+ dest.Md5Hash = ""
+ dest.Etag = ""
+ dest.Size = 0
+ dest.Metadata = metadata
+
+ result, err := s.CreateObject(dest.StreamingObject(), NoConditions{})
+ if err != nil {
+ return result, err
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go
new file mode 100644
index 0000000000000..6d9d2313d27dc
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go
@@ -0,0 +1,13 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+type metadataHandler interface {
+ write(path string, encoded []byte) error
+ read(path string) ([]byte, error)
+ remove(path string) error
+ isSpecialFile(path string) bool
+ rename(pathSrc, pathDst string) error
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go
new file mode 100644
index 0000000000000..94cce654a8c69
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go
@@ -0,0 +1,34 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "os"
+ "strings"
+)
+
+const metadataSuffix = ".metadata"
+
+type metadataFile struct{}
+
+func (m metadataFile) write(path string, encoded []byte) error {
+ return writeFile(path+metadataSuffix, encoded, 0o600)
+}
+
+func (m metadataFile) read(path string) ([]byte, error) {
+ return os.ReadFile(path + metadataSuffix)
+}
+
+func (m metadataFile) isSpecialFile(path string) bool {
+ return strings.HasSuffix(path, metadataSuffix)
+}
+
+func (m metadataFile) remove(path string) error {
+ return os.Remove(path + metadataSuffix)
+}
+
+func (m metadataFile) rename(pathSrc, pathDst string) error {
+ return os.Rename(pathSrc+metadataSuffix, pathDst+metadataSuffix)
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go
new file mode 100644
index 0000000000000..9d40580120be6
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go
@@ -0,0 +1,33 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "github.com/pkg/xattr"
+)
+
+const xattrKey = "user.metadata"
+
+type metadataXattr struct{}
+
+func (m metadataXattr) write(path string, encoded []byte) error {
+ return xattr.Set(path, xattrKey, encoded)
+}
+
+func (m metadataXattr) read(path string) ([]byte, error) {
+ return xattr.Get(path, xattrKey)
+}
+
+func (m metadataXattr) isSpecialFile(path string) bool {
+ return false
+}
+
+func (m metadataXattr) remove(path string) error {
+ return nil
+}
+
+func (m metadataXattr) rename(pathSrc, pathDst string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go
index e0ca2b12ec571..63bf8d6d147c3 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go
@@ -4,16 +4,104 @@
package backend
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+
+ "cloud.google.com/go/storage"
+)
+
+// ObjectAttrs represents the meta-data without its contents.
+type ObjectAttrs struct {
+ BucketName string `json:"-"`
+ Name string `json:"-"`
+ Size int64 `json:"-"`
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ CacheControl string
+ Crc32c string
+ Md5Hash string
+ Etag string
+ ACL []storage.ACLRule
+ Metadata map[string]string
+ Created string
+ Deleted string
+ Updated string
+ CustomTime string
+ Generation int64
+}
+
+// ID is used for comparing objects.
+func (o *ObjectAttrs) ID() string {
+ return fmt.Sprintf("%s#%d", o.IDNoGen(), o.Generation)
+}
+
+// IDNoGen does not consider the generation field.
+func (o *ObjectAttrs) IDNoGen() string {
+ return fmt.Sprintf("%s/%s", o.BucketName, o.Name)
+}
+
// Object represents the object that is stored within the fake server.
type Object struct {
- BucketName string `json:"-"`
- Name string `json:"-"`
- Content []byte
- Crc32c string
- Md5Hash string
+ ObjectAttrs
+ Content []byte
+}
+
+type noopSeekCloser struct {
+ io.ReadSeeker
+}
+
+func (n noopSeekCloser) Close() error {
+ return nil
+}
+
+func (o Object) StreamingObject() StreamingObject {
+ return StreamingObject{
+ ObjectAttrs: o.ObjectAttrs,
+ Content: noopSeekCloser{bytes.NewReader(o.Content)},
+ }
+}
+
+type StreamingObject struct {
+ ObjectAttrs
+ Content io.ReadSeekCloser
+}
+
+func (o *StreamingObject) Close() error {
+ if o != nil && o.Content != nil {
+ return o.Content.Close()
+ }
+ return nil
+}
+
+// Convert this StreamingObject to a (buffered) Object.
+func (o *StreamingObject) BufferedObject() (Object, error) {
+ data, err := io.ReadAll(o.Content)
+ return Object{
+ ObjectAttrs: o.ObjectAttrs,
+ Content: data,
+ }, err
}
-// ID is useful for comparing objects
-func (o *Object) ID() string {
- return o.BucketName + "/" + o.Name
+func (o *StreamingObject) patch(attrsToUpdate ObjectAttrs) {
+ currObjValues := reflect.ValueOf(&(o.ObjectAttrs)).Elem()
+ currObjType := currObjValues.Type()
+ newObjValues := reflect.ValueOf(attrsToUpdate)
+ for i := 0; i < newObjValues.NumField(); i++ {
+ if reflect.Value.IsZero(newObjValues.Field(i)) {
+ continue
+ } else if currObjType.Field(i).Name == "Metadata" {
+ if o.Metadata == nil {
+ o.Metadata = map[string]string{}
+ }
+ for k, v := range attrsToUpdate.Metadata {
+ o.Metadata[k] = v
+ }
+ } else {
+ currObjValues.Field(i).Set(newObjValues.Field(i))
+ }
+ }
}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go
index c77583462fdb5..da8e8e51e2128 100644
--- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go
@@ -2,15 +2,43 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Package backend proides the backends used by fake-gcs-server.
package backend
-// Storage is the generic interface for implementing the backend storage of the server
+type Conditions interface {
+ ConditionsMet(activeGeneration int64) bool
+}
+
+type NoConditions struct{}
+
+func (NoConditions) ConditionsMet(int64) bool {
+ return true
+}
+
+// Storage is the generic interface for implementing the backend storage of the
+// server.
type Storage interface {
- CreateBucket(name string) error
- ListBuckets() ([]string, error)
- GetBucket(name string) error
- CreateObject(obj Object) error
- ListObjects(bucketName string) ([]Object, error)
- GetObject(bucketName, objectName string) (Object, error)
+ CreateBucket(name string, bucketAttrs BucketAttrs) error
+ ListBuckets() ([]Bucket, error)
+ GetBucket(name string) (Bucket, error)
+ UpdateBucket(name string, attrsToUpdate BucketAttrs) error
+ DeleteBucket(name string) error
+ CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error)
+ ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error)
+ GetObject(bucketName, objectName string) (StreamingObject, error)
+ GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error)
DeleteObject(bucketName, objectName string) error
+ PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error)
+ UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error)
+ ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error)
}
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+ BucketNotFound = Error("bucket not found")
+ BucketNotEmpty = Error("bucket must be empty prior to deletion")
+ PreConditionFailed = Error("Precondition failed")
+)
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go
new file mode 100644
index 0000000000000..a01893f6be036
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go
@@ -0,0 +1,19 @@
+// Copyright 2019 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || freebsd
+
+package backend
+
+import (
+ "os"
+ "syscall"
+)
+
+func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec {
+ if statT, ok := input.Sys().(*syscall.Stat_t); ok {
+ return statT.Ctimespec
+ }
+ return syscall.Timespec{}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go
new file mode 100644
index 0000000000000..0f959e9b74c6c
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go
@@ -0,0 +1,18 @@
+// Copyright 2019 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "os"
+ "syscall"
+)
+
+func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec {
+ if statT, ok := input.Sys().(*syscall.Stat_t); ok {
+ // not true: Ctime is not created time, but not creating a file to persist this metadata, yet...
+ return statT.Ctim
+ }
+ return syscall.Timespec{}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go
new file mode 100644
index 0000000000000..54c7bc9b0badd
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2019 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "os"
+ "syscall"
+)
+
+func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec {
+ if statT, ok := input.Sys().(*syscall.Win32FileAttributeData); ok {
+ nsec := statT.CreationTime.Nanoseconds()
+ return syscall.NsecToTimespec(nsec)
+ }
+ return syscall.Timespec{}
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go
new file mode 100644
index 0000000000000..2e5e510fbc3d4
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go
@@ -0,0 +1,17 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package backend
+
+import (
+ "os"
+
+ "github.com/google/renameio/v2"
+)
+
+func writeFile(filename string, data []byte, perm os.FileMode) error {
+ return renameio.WriteFile(filename, data, perm)
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go
new file mode 100644
index 0000000000000..2d6600c803024
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go
@@ -0,0 +1,13 @@
+// Copyright 2022 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package backend
+
+import (
+ "os"
+)
+
+func writeFile(filename string, data []byte, perm os.FileMode) error {
+ return os.WriteFile(filename, data, perm)
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go b/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go
new file mode 100644
index 0000000000000..c247336d8e65e
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go
@@ -0,0 +1,70 @@
+// Copyright 2021 Francisco Souza. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package checksum
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "hash"
+ "hash/crc32"
+)
+
+var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
+
+func crc32cChecksum(content []byte) []byte {
+ checksummer := crc32.New(crc32cTable)
+ checksummer.Write(content)
+ return checksummer.Sum(make([]byte, 0, 4))
+}
+
+func EncodedChecksum(checksum []byte) string {
+ return base64.StdEncoding.EncodeToString(checksum)
+}
+
+func EncodedCrc32cChecksum(content []byte) string {
+ return EncodedChecksum(crc32cChecksum(content))
+}
+
+func MD5Hash(b []byte) []byte {
+ h := md5.New()
+ h.Write(b)
+ return h.Sum(nil)
+}
+
+func EncodedHash(hash []byte) string {
+ return base64.StdEncoding.EncodeToString(hash)
+}
+
+func EncodedMd5Hash(content []byte) string {
+ return EncodedHash(MD5Hash(content))
+}
+
+type StreamingHasher struct {
+ crc32 hash.Hash32
+ md5 hash.Hash
+}
+
+func NewStreamingHasher() *StreamingHasher {
+ return &StreamingHasher{
+ crc32: crc32.New(crc32cTable),
+ md5: md5.New(),
+ }
+}
+
+func (s *StreamingHasher) Write(p []byte) (n int, err error) {
+ n, err = s.crc32.Write(p)
+ if err != nil {
+ return n, err
+ }
+ return s.md5.Write(p)
+}
+
+func (s *StreamingHasher) EncodedCrc32cChecksum() string {
+ return EncodedChecksum(s.crc32.Sum(nil))
+}
+
+func (s *StreamingHasher) EncodedMd5Hash() string {
+ return EncodedHash(s.md5.Sum(nil))
+}
diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go b/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go
new file mode 100644
index 0000000000000..f20ac8c87a40a
--- /dev/null
+++ b/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go
@@ -0,0 +1,222 @@
+package notification
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/pubsub"
+ "github.com/fsouza/fake-gcs-server/internal/backend"
+)
+
+// EventType is the type of event to trigger. The descriptions of the events
+// can be found here:
+// https://cloud.google.com/storage/docs/pubsub-notifications#events.
+type EventType string
+
+const (
+ // EventFinalize is triggered when an object is added.
+ EventFinalize EventType = "OBJECT_FINALIZE"
+ // EventDelete is triggered when an object is deleted.
+ EventDelete = "OBJECT_DELETE"
+ // EventMetadata is triggered when an object's metadata is changed.
+ EventMetadata = "OBJECT_METADATA_UPDATE"
+ // EventArchive bucket versioning must be enabled. is triggered when an object becomes the non current version
+ EventArchive = "OBJECT_ARCHIVE"
+)
+
+// EventNotificationOptions contains flags for events, that if true, will create
+// trigger notifications when they occur.
+type EventNotificationOptions struct {
+ Finalize bool
+ Delete bool
+ MetadataUpdate bool
+ Archive bool
+}
+
+// EventManagerOptions determines what events are triggered and where.
+type EventManagerOptions struct {
+ // ProjectID is the project ID containing the pubsub topic.
+ ProjectID string
+ // TopicName is the pubsub topic name to publish events on.
+ TopicName string
+ // Bucket is the name of the bucket to publish events from.
+ Bucket string
+ // ObjectPrefix, if not empty, only objects having this prefix will generate
+ // trigger events.
+ ObjectPrefix string
+ // NotifyOn determines what events to trigger.
+ NotifyOn EventNotificationOptions
+}
+
+type EventManager interface {
+ Trigger(o *backend.StreamingObject, eventType EventType, extraEventAttr map[string]string)
+}
+
+// PubsubEventManager checks if an event should be published.
+type PubsubEventManager struct {
+ // publishSynchronously is a flag that if true, events will be published
+ // synchronously and not in a goroutine. It is used during tests to prevent
+ // race conditions.
+ publishSynchronously bool
+ // notifyOn determines what events are triggered.
+ notifyOn EventNotificationOptions
+ // writer is where logs are written to.
+ writer io.Writer
+ // bucket, if not empty, only objects from this bucker will generate trigger events.
+ bucket string
+ // objectPrefix, if not empty, only objects having this prefix will generate
+ // trigger events.
+ objectPrefix string
+ // publisher is used to publish events on.
+ publisher eventPublisher
+}
+
+func NewPubsubEventManager(options EventManagerOptions, w io.Writer) (*PubsubEventManager, error) {
+ manager := &PubsubEventManager{
+ writer: w,
+ notifyOn: options.NotifyOn,
+ bucket: options.Bucket,
+ objectPrefix: options.ObjectPrefix,
+ }
+ if options.ProjectID != "" && options.TopicName != "" {
+ ctx := context.Background()
+ client, err := pubsub.NewClient(ctx, options.ProjectID)
+ if err != nil {
+ return nil, fmt.Errorf("error creating pubsub client: %v", err)
+ }
+ manager.publisher = client.Topic(options.TopicName)
+ }
+ return manager, nil
+}
+
+// eventPublisher is the interface to publish triggered events.
+type eventPublisher interface {
+ Publish(ctx context.Context, msg *pubsub.Message) *pubsub.PublishResult
+}
+
+// Trigger checks if an event should be triggered. If so, it publishes the
+// event to a pubsub queue.
+func (m *PubsubEventManager) Trigger(o *backend.StreamingObject, eventType EventType, extraEventAttr map[string]string) {
+ if m.publisher == nil {
+ return
+ }
+ if m.bucket != "" && o.BucketName != m.bucket {
+ return
+ }
+ if m.objectPrefix != "" && !strings.HasPrefix(o.Name, m.objectPrefix) {
+ return
+ }
+ switch eventType {
+ case EventFinalize:
+ if !m.notifyOn.Finalize {
+ return
+ }
+ case EventDelete:
+ if !m.notifyOn.Delete {
+ return
+ }
+ case EventMetadata:
+ if !m.notifyOn.MetadataUpdate {
+ return
+ }
+ case EventArchive:
+ if !m.notifyOn.Archive {
+ return
+ }
+ }
+ eventTime := time.Now().Format(time.RFC3339)
+ publishFunc := func() {
+ err := m.publish(o, eventType, eventTime, extraEventAttr)
+ if m.writer != nil {
+ if err != nil {
+ fmt.Fprintf(m.writer, "error publishing event: %v", err)
+ } else {
+ fmt.Fprintf(m.writer, "sent event %s for object %s\n", string(eventType), o.ID())
+ }
+ }
+ }
+ if m.publishSynchronously {
+ publishFunc()
+ } else {
+ go publishFunc()
+ }
+}
+
+func (m *PubsubEventManager) publish(o *backend.StreamingObject, eventType EventType, eventTime string, extraEventAttr map[string]string) error {
+ ctx := context.Background()
+ data, attributes, err := generateEvent(o, eventType, eventTime, extraEventAttr)
+ if err != nil {
+ return err
+ }
+ if r := m.publisher.Publish(ctx, &pubsub.Message{
+ Data: data,
+ Attributes: attributes,
+ }); r != nil {
+ _, err = r.Get(ctx)
+ return err
+ }
+ return nil
+}
+
+// gcsEvent is the payload of a GCS event. Note that all properties are string-quoted.
+// The description of the full object can be found here:
+// https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations.
+type gcsEvent struct {
+ Kind string `json:"kind"`
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Bucket string `json:"bucket"`
+ Generation int64 `json:"generation,string,omitempty"`
+ ContentType string `json:"contentType"`
+ ContentEncoding string `json:"contentEncoding,omitempty"`
+ Created string `json:"timeCreated,omitempty"`
+ Updated string `json:"updated,omitempty"`
+ StorageClass string `json:"storageClass"`
+ Size int64 `json:"size,string"`
+ MD5Hash string `json:"md5Hash,omitempty"`
+ CRC32c string `json:"crc32c,omitempty"`
+ MetaData map[string]string `json:"metadata,omitempty"`
+}
+
+func generateEvent(o *backend.StreamingObject, eventType EventType, eventTime string, extraEventAttr map[string]string) ([]byte, map[string]string, error) {
+ payload := gcsEvent{
+ Kind: "storage#object",
+ ID: o.ID(),
+ Name: o.Name,
+ Bucket: o.BucketName,
+ Generation: o.Generation,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ Created: o.Created,
+ Updated: o.Updated,
+ StorageClass: "STANDARD",
+ Size: o.Size,
+ MD5Hash: o.Md5Hash,
+ CRC32c: o.Crc32c,
+ MetaData: o.Metadata,
+ }
+ attributes := map[string]string{
+ "bucketId": o.BucketName,
+ "eventTime": eventTime,
+ "eventType": string(eventType),
+ "objectGeneration": strconv.FormatInt(o.Generation, 10),
+ "objectId": o.Name,
+ "payloadFormat": "JSON_API_V1",
+ }
+ for k, v := range extraEventAttr {
+ if _, exists := attributes[k]; exists {
+ return nil, nil, fmt.Errorf("cannot overwrite duplicate event attribute %s", k)
+ }
+ attributes[k] = v
+ }
+ data, err := json.Marshal(&payload)
+ if err != nil {
+ return nil, nil, err
+ }
+ return data, attributes, nil
+}
diff --git a/vendor/github.com/gorilla/handlers/.editorconfig b/vendor/github.com/gorilla/handlers/.editorconfig
new file mode 100644
index 0000000000000..c6b74c3e0d0c7
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/.editorconfig
@@ -0,0 +1,20 @@
+; https://editorconfig.org/
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = space
+indent_size = 2
+
+[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
+indent_style = tab
+indent_size = 4
+
+[*.md]
+indent_size = 4
+trim_trailing_whitespace = false
+
+eclint_indent_style = unset
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/handlers/.gitignore b/vendor/github.com/gorilla/handlers/.gitignore
new file mode 100644
index 0000000000000..577a89e813831
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/.gitignore
@@ -0,0 +1,2 @@
+# Output of the go test coverage tool
+coverage.coverprofile
diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE
new file mode 100644
index 0000000000000..bb9d80bc9b6bc
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2023 The Gorilla Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/handlers/Makefile b/vendor/github.com/gorilla/handlers/Makefile
new file mode 100644
index 0000000000000..003b784f7edbf
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/Makefile
@@ -0,0 +1,34 @@
+GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
+GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+
+GO_SEC=$(shell which gosec 2> /dev/null || echo '')
+GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
+
+GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
+GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
+
+.PHONY: verify
+verify: sec govulncheck lint test
+
+.PHONY: lint
+lint:
+ $(if $(GO_LINT), ,go install $(GO_LINT_URI))
+ @echo "##### Running golangci-lint #####"
+ golangci-lint run -v
+
+.PHONY: sec
+sec:
+ $(if $(GO_SEC), ,go install $(GO_SEC_URI))
+ @echo "##### Running gosec #####"
+ gosec ./...
+
+.PHONY: govulncheck
+govulncheck:
+ $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
+ @echo "##### Running govulncheck #####"
+ govulncheck ./...
+
+.PHONY: test
+test:
+ @echo "##### Running tests #####"
+ go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md
new file mode 100644
index 0000000000000..02555b2642c5f
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/README.md
@@ -0,0 +1,56 @@
+# gorilla/handlers
+
+
+[](https://codecov.io/github/gorilla/handlers)
+[](https://godoc.org/github.com/gorilla/handlers)
+[](https://sourcegraph.com/github.com/gorilla/handlers?badge)
+
+Package handlers is a collection of handlers (aka "HTTP middleware") for use
+with Go's `net/http` package (or any framework supporting `http.Handler`), including:
+
+* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log
+ Format](http://httpd.apache.org/docs/2.2/logs.html#common).
+* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log
+ Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by
+ both Apache and nginx.
+* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses.
+* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted
+ content types.
+* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a
+ `map[string]http.Handler`
+* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the
+ `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded`
+ headers when running a Go server behind a HTTP reverse proxy.
+* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple
+ domains (i.e. multiple CNAME aliases).
+* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics.
+
+Other handlers are documented [on the Gorilla
+website](https://www.gorillatoolkit.org/pkg/handlers).
+
+## Example
+
+A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`:
+
+```go
+import (
+ "net/http"
+ "github.com/gorilla/handlers"
+)
+
+func main() {
+ r := http.NewServeMux()
+
+ // Only log requests to our admin dashboard to stdout
+ r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard)))
+ r.HandleFunc("/", ShowIndex)
+
+ // Wrap our server with our gzip handler to gzip compress all responses.
+ http.ListenAndServe(":8000", handlers.CompressHandler(r))
+}
+```
+
+## License
+
+BSD licensed. See the included LICENSE file for details.
+
diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go
new file mode 100644
index 0000000000000..7121f5307bec9
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/canonical.go
@@ -0,0 +1,73 @@
+package handlers
+
+import (
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+type canonical struct {
+ h http.Handler
+ domain string
+ code int
+}
+
+// CanonicalHost is HTTP middleware that re-directs requests to the canonical
+// domain. It accepts a domain and a status code (e.g. 301 or 302) and
+// re-directs clients to this domain. The existing request path is maintained.
+//
+// Note: If the provided domain is considered invalid by url.Parse or otherwise
+// returns an empty scheme or host, clients are not re-directed.
+//
+// Example:
+//
+// r := mux.NewRouter()
+// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302)
+// r.HandleFunc("/route", YourHandler)
+//
+// log.Fatal(http.ListenAndServe(":7000", canonical(r)))
+func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler {
+ fn := func(h http.Handler) http.Handler {
+ return canonical{h, domain, code}
+ }
+
+ return fn
+}
+
+func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ dest, err := url.Parse(c.domain)
+ if err != nil {
+ // Call the next handler if the provided domain fails to parse.
+ c.h.ServeHTTP(w, r)
+ return
+ }
+
+ if dest.Scheme == "" || dest.Host == "" {
+ // Call the next handler if the scheme or host are empty.
+ // Note that url.Parse won't fail on in this case.
+ c.h.ServeHTTP(w, r)
+ return
+ }
+
+ if !strings.EqualFold(cleanHost(r.Host), dest.Host) {
+ // Re-build the destination URL
+ dest := dest.Scheme + "://" + dest.Host + r.URL.Path
+ if r.URL.RawQuery != "" {
+ dest += "?" + r.URL.RawQuery
+ }
+ http.Redirect(w, r, dest, c.code)
+ return
+ }
+
+ c.h.ServeHTTP(w, r)
+}
+
+// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '.
+// This is backported from Go 1.5 (in response to issue #11206) and attempts to
+// mitigate malformed Host headers that do not match the format in RFC7230.
+func cleanHost(in string) string {
+ if i := strings.IndexAny(in, " /"); i != -1 {
+ return in[:i]
+ }
+ return in
+}
diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go
new file mode 100644
index 0000000000000..d6f589503b5ea
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/compress.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "compress/flate"
+ "compress/gzip"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/felixge/httpsnoop"
+)
+
+const acceptEncoding string = "Accept-Encoding"
+
+type compressResponseWriter struct {
+ compressor io.Writer
+ w http.ResponseWriter
+}
+
+func (cw *compressResponseWriter) WriteHeader(c int) {
+ cw.w.Header().Del("Content-Length")
+ cw.w.WriteHeader(c)
+}
+
+func (cw *compressResponseWriter) Write(b []byte) (int, error) {
+ h := cw.w.Header()
+ if h.Get("Content-Type") == "" {
+ h.Set("Content-Type", http.DetectContentType(b))
+ }
+ h.Del("Content-Length")
+
+ return cw.compressor.Write(b)
+}
+
+func (cw *compressResponseWriter) ReadFrom(r io.Reader) (int64, error) {
+ return io.Copy(cw.compressor, r)
+}
+
+type flusher interface {
+ Flush() error
+}
+
+func (cw *compressResponseWriter) Flush() {
+ // Flush compressed data if compressor supports it.
+ if f, ok := cw.compressor.(flusher); ok {
+ _ = f.Flush()
+ }
+ // Flush HTTP response.
+ if f, ok := cw.w.(http.Flusher); ok {
+ f.Flush()
+ }
+}
+
+// CompressHandler gzip compresses HTTP responses for clients that support it
+// via the 'Accept-Encoding' header.
+//
+// Compressing TLS traffic may leak the page contents to an attacker if the
+// page contains user input: http://security.stackexchange.com/a/102015/12208
+func CompressHandler(h http.Handler) http.Handler {
+ return CompressHandlerLevel(h, gzip.DefaultCompression)
+}
+
+// CompressHandlerLevel gzip compresses HTTP responses with specified compression level
+// for clients that support it via the 'Accept-Encoding' header.
+//
+// The compression level should be gzip.DefaultCompression, gzip.NoCompression,
+// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive.
+// gzip.DefaultCompression is used in case of invalid compression level.
+func CompressHandlerLevel(h http.Handler, level int) http.Handler {
+ if level < gzip.DefaultCompression || level > gzip.BestCompression {
+ level = gzip.DefaultCompression
+ }
+
+ const (
+ gzipEncoding = "gzip"
+ flateEncoding = "deflate"
+ )
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // detect what encoding to use
+ var encoding string
+ for _, curEnc := range strings.Split(r.Header.Get(acceptEncoding), ",") {
+ curEnc = strings.TrimSpace(curEnc)
+ if curEnc == gzipEncoding || curEnc == flateEncoding {
+ encoding = curEnc
+ break
+ }
+ }
+
+ // always add Accept-Encoding to Vary to prevent intermediate caches corruption
+ w.Header().Add("Vary", acceptEncoding)
+
+ // if we weren't able to identify an encoding we're familiar with, pass on the
+ // request to the handler and return
+ if encoding == "" {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ if r.Header.Get("Upgrade") != "" {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ // wrap the ResponseWriter with the writer for the chosen encoding
+ var encWriter io.WriteCloser
+ if encoding == gzipEncoding {
+ encWriter, _ = gzip.NewWriterLevel(w, level)
+ } else if encoding == flateEncoding {
+ encWriter, _ = flate.NewWriter(w, level)
+ }
+ defer encWriter.Close()
+
+ w.Header().Set("Content-Encoding", encoding)
+ r.Header.Del(acceptEncoding)
+
+ cw := &compressResponseWriter{
+ w: w,
+ compressor: encWriter,
+ }
+
+ w = httpsnoop.Wrap(w, httpsnoop.Hooks{
+ Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
+ return cw.Write
+ },
+ WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
+ return cw.WriteHeader
+ },
+ Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc {
+ return cw.Flush
+ },
+ ReadFrom: func(rff httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc {
+ return cw.ReadFrom
+ },
+ })
+
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go
new file mode 100644
index 0000000000000..8af9c096e5e40
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/cors.go
@@ -0,0 +1,352 @@
+package handlers
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// CORSOption represents a functional option for configuring the CORS middleware.
+type CORSOption func(*cors) error
+
+type cors struct {
+ h http.Handler
+ allowedHeaders []string
+ allowedMethods []string
+ allowedOrigins []string
+ allowedOriginValidator OriginValidator
+ exposedHeaders []string
+ maxAge int
+ ignoreOptions bool
+ allowCredentials bool
+ optionStatusCode int
+}
+
+// OriginValidator takes an origin string and returns whether or not that origin is allowed.
+type OriginValidator func(string) bool
+
+var (
+ defaultCorsOptionStatusCode = http.StatusOK
+ defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost}
+ defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"}
+ // (WebKit/Safari v9 sends the Origin header by default in AJAX requests).
+)
+
+const (
+ corsOptionMethod string = http.MethodOptions
+ corsAllowOriginHeader string = "Access-Control-Allow-Origin"
+ corsExposeHeadersHeader string = "Access-Control-Expose-Headers"
+ corsMaxAgeHeader string = "Access-Control-Max-Age"
+ corsAllowMethodsHeader string = "Access-Control-Allow-Methods"
+ corsAllowHeadersHeader string = "Access-Control-Allow-Headers"
+ corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials"
+ corsRequestMethodHeader string = "Access-Control-Request-Method"
+ corsRequestHeadersHeader string = "Access-Control-Request-Headers"
+ corsOriginHeader string = "Origin"
+ corsVaryHeader string = "Vary"
+ corsOriginMatchAll string = "*"
+)
+
+func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ origin := r.Header.Get(corsOriginHeader)
+ if !ch.isOriginAllowed(origin) {
+ if r.Method != corsOptionMethod || ch.ignoreOptions {
+ ch.h.ServeHTTP(w, r)
+ }
+
+ return
+ }
+
+ if r.Method == corsOptionMethod {
+ if ch.ignoreOptions {
+ ch.h.ServeHTTP(w, r)
+ return
+ }
+
+ if _, ok := r.Header[corsRequestMethodHeader]; !ok {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
+ method := r.Header.Get(corsRequestMethodHeader)
+ if !ch.isMatch(method, ch.allowedMethods) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ return
+ }
+
+ requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",")
+ allowedHeaders := []string{}
+ for _, v := range requestHeaders {
+ canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) {
+ continue
+ }
+
+ if !ch.isMatch(canonicalHeader, ch.allowedHeaders) {
+ w.WriteHeader(http.StatusForbidden)
+ return
+ }
+
+ allowedHeaders = append(allowedHeaders, canonicalHeader)
+ }
+
+ if len(allowedHeaders) > 0 {
+ w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ","))
+ }
+
+ if ch.maxAge > 0 {
+ w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge))
+ }
+
+ if !ch.isMatch(method, defaultCorsMethods) {
+ w.Header().Set(corsAllowMethodsHeader, method)
+ }
+ } else if len(ch.exposedHeaders) > 0 {
+ w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ","))
+ }
+
+ if ch.allowCredentials {
+ w.Header().Set(corsAllowCredentialsHeader, "true")
+ }
+
+ if len(ch.allowedOrigins) > 1 {
+ w.Header().Set(corsVaryHeader, corsOriginHeader)
+ }
+
+ returnOrigin := origin
+ if ch.allowedOriginValidator == nil && len(ch.allowedOrigins) == 0 {
+ returnOrigin = "*"
+ } else {
+ for _, o := range ch.allowedOrigins {
+ // A configuration of * is different than explicitly setting an allowed
+ // origin. Returning arbitrary origin headers in an access control allow
+ // origin header is unsafe and is not required by any use case.
+ if o == corsOriginMatchAll {
+ returnOrigin = "*"
+ break
+ }
+ }
+ }
+ w.Header().Set(corsAllowOriginHeader, returnOrigin)
+
+ if r.Method == corsOptionMethod {
+ w.WriteHeader(ch.optionStatusCode)
+ return
+ }
+ ch.h.ServeHTTP(w, r)
+}
+
+// CORS provides Cross-Origin Resource Sharing middleware.
+// Example:
+//
+// import (
+// "net/http"
+//
+// "github.com/gorilla/handlers"
+// "github.com/gorilla/mux"
+// )
+//
+// func main() {
+// r := mux.NewRouter()
+// r.HandleFunc("/users", UserEndpoint)
+// r.HandleFunc("/projects", ProjectEndpoint)
+//
+// // Apply the CORS middleware to our top-level router, with the defaults.
+// http.ListenAndServe(":8000", handlers.CORS()(r))
+// }
+func CORS(opts ...CORSOption) func(http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ ch := parseCORSOptions(opts...)
+ ch.h = h
+ return ch
+ }
+}
+
+func parseCORSOptions(opts ...CORSOption) *cors {
+ ch := &cors{
+ allowedMethods: defaultCorsMethods,
+ allowedHeaders: defaultCorsHeaders,
+ allowedOrigins: []string{},
+ optionStatusCode: defaultCorsOptionStatusCode,
+ }
+
+ for _, option := range opts {
+ _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil?
+ }
+
+ return ch
+}
+
+//
+// Functional options for configuring CORS.
+//
+
+// AllowedHeaders adds the provided headers to the list of allowed headers in a
+// CORS request.
+// This is an append operation so the headers Accept, Accept-Language,
+// and Content-Language are always allowed.
+// Content-Type must be explicitly declared if accepting Content-Types other than
+// application/x-www-form-urlencoded, multipart/form-data, or text/plain.
+func AllowedHeaders(headers []string) CORSOption {
+ return func(ch *cors) error {
+ for _, v := range headers {
+ normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if normalizedHeader == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedHeader, ch.allowedHeaders) {
+ ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader)
+ }
+ }
+
+ return nil
+ }
+}
+
+// AllowedMethods can be used to explicitly allow methods in the
+// Access-Control-Allow-Methods header.
+// This is a replacement operation so you must also
+// pass GET, HEAD, and POST if you wish to support those methods.
+func AllowedMethods(methods []string) CORSOption {
+ return func(ch *cors) error {
+ ch.allowedMethods = []string{}
+ for _, v := range methods {
+ normalizedMethod := strings.ToUpper(strings.TrimSpace(v))
+ if normalizedMethod == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedMethod, ch.allowedMethods) {
+ ch.allowedMethods = append(ch.allowedMethods, normalizedMethod)
+ }
+ }
+
+ return nil
+ }
+}
+
+// AllowedOrigins sets the allowed origins for CORS requests, as used in the
+// 'Allow-Access-Control-Origin' HTTP header.
+// Note: Passing in a []string{"*"} will allow any domain.
+func AllowedOrigins(origins []string) CORSOption {
+ return func(ch *cors) error {
+ for _, v := range origins {
+ if v == corsOriginMatchAll {
+ ch.allowedOrigins = []string{corsOriginMatchAll}
+ return nil
+ }
+ }
+
+ ch.allowedOrigins = origins
+ return nil
+ }
+}
+
+// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the
+// 'Allow-Access-Control-Origin' HTTP header.
+func AllowedOriginValidator(fn OriginValidator) CORSOption {
+ return func(ch *cors) error {
+ ch.allowedOriginValidator = fn
+ return nil
+ }
+}
+
+// OptionStatusCode sets a custom status code on the OPTIONS requests.
+// Default behaviour sets it to 200 to reflect best practices. This is option is not mandatory
+// and can be used if you need a custom status code (i.e 204).
+//
+// More informations on the spec:
+// https://fetch.spec.whatwg.org/#cors-preflight-fetch
+func OptionStatusCode(code int) CORSOption {
+ return func(ch *cors) error {
+ ch.optionStatusCode = code
+ return nil
+ }
+}
+
+// ExposedHeaders can be used to specify headers that are available
+// and will not be stripped out by the user-agent.
+func ExposedHeaders(headers []string) CORSOption {
+ return func(ch *cors) error {
+ ch.exposedHeaders = []string{}
+ for _, v := range headers {
+ normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
+ if normalizedHeader == "" {
+ continue
+ }
+
+ if !ch.isMatch(normalizedHeader, ch.exposedHeaders) {
+ ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader)
+ }
+ }
+
+ return nil
+ }
+}
+
+// MaxAge determines the maximum age (in seconds) between preflight requests. A
+// maximum of 10 minutes is allowed. An age above this value will default to 10
+// minutes.
+func MaxAge(age int) CORSOption {
+ return func(ch *cors) error {
+ // Maximum of 10 minutes.
+ if age > 600 {
+ age = 600
+ }
+
+ ch.maxAge = age
+ return nil
+ }
+}
+
+// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead
+// passing them through to the next handler. This is useful when your application
+// or framework has a pre-existing mechanism for responding to OPTIONS requests.
+func IgnoreOptions() CORSOption {
+ return func(ch *cors) error {
+ ch.ignoreOptions = true
+ return nil
+ }
+}
+
+// AllowCredentials can be used to specify that the user agent may pass
+// authentication details along with the request.
+func AllowCredentials() CORSOption {
+ return func(ch *cors) error {
+ ch.allowCredentials = true
+ return nil
+ }
+}
+
+func (ch *cors) isOriginAllowed(origin string) bool {
+ if origin == "" {
+ return false
+ }
+
+ if ch.allowedOriginValidator != nil {
+ return ch.allowedOriginValidator(origin)
+ }
+
+ if len(ch.allowedOrigins) == 0 {
+ return true
+ }
+
+ for _, allowedOrigin := range ch.allowedOrigins {
+ if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ch *cors) isMatch(needle string, haystack []string) bool {
+ for _, v := range haystack {
+ if v == needle {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/gorilla/handlers/doc.go
new file mode 100644
index 0000000000000..944e5a8ae9982
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/doc.go
@@ -0,0 +1,9 @@
+/*
+Package handlers is a collection of handlers (aka "HTTP middleware") for use
+with Go's net/http package (or any framework supporting http.Handler).
+
+The package includes handlers for logging in standardised formats, compressing
+HTTP responses, validating content types and other useful tools for manipulating
+requests and responses.
+*/
+package handlers
diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go
new file mode 100644
index 0000000000000..9b92fce3333e7
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/handlers.go
@@ -0,0 +1,150 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+// MethodHandler is an http.Handler that dispatches to a handler whose key in the
+// MethodHandler's map matches the name of the HTTP request's method, eg: GET
+//
+// If the request's method is OPTIONS and OPTIONS is not a key in the map then
+// the handler responds with a status of 200 and sets the Allow header to a
+// comma-separated list of available methods.
+//
+// If the request's method doesn't match any of its keys the handler responds
+// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a
+// comma-separated list of available methods.
+type MethodHandler map[string]http.Handler
+
+func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ if handler, ok := h[req.Method]; ok {
+ handler.ServeHTTP(w, req)
+ } else {
+ allow := []string{}
+ for k := range h {
+ allow = append(allow, k)
+ }
+ sort.Strings(allow)
+ w.Header().Set("Allow", strings.Join(allow, ", "))
+ if req.Method == http.MethodOptions {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+ }
+ }
+}
+
+// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP
+// status code and body size.
+type responseLogger struct {
+ w http.ResponseWriter
+ status int
+ size int
+}
+
+func (l *responseLogger) Write(b []byte) (int, error) {
+ size, err := l.w.Write(b)
+ l.size += size
+ return size, err
+}
+
+func (l *responseLogger) WriteHeader(s int) {
+ l.w.WriteHeader(s)
+ l.status = s
+}
+
+func (l *responseLogger) Status() int {
+ return l.status
+}
+
+func (l *responseLogger) Size() int {
+ return l.size
+}
+
+func (l *responseLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ conn, rw, err := l.w.(http.Hijacker).Hijack()
+ if err == nil && l.status == 0 {
+ // The status will be StatusSwitchingProtocols if there was no error and
+ // WriteHeader has not been called yet
+ l.status = http.StatusSwitchingProtocols
+ }
+ return conn, rw, err
+}
+
+// isContentType validates the Content-Type header matches the supplied
+// contentType. That is, its type and subtype match.
+func isContentType(h http.Header, contentType string) bool {
+ ct := h.Get("Content-Type")
+ if i := strings.IndexRune(ct, ';'); i != -1 {
+ ct = ct[0:i]
+ }
+ return ct == contentType
+}
+
+// ContentTypeHandler wraps and returns a http.Handler, validating the request
+// content type is compatible with the contentTypes list. It writes a HTTP 415
+// error if that fails.
+//
+// Only PUT, POST, and PATCH requests are considered.
+func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) {
+ h.ServeHTTP(w, r)
+ return
+ }
+
+ for _, ct := range contentTypes {
+ if isContentType(r.Header, ct) {
+ h.ServeHTTP(w, r)
+ return
+ }
+ }
+ http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q",
+ r.Header.Get("Content-Type"),
+ contentTypes),
+ http.StatusUnsupportedMediaType)
+ })
+}
+
+const (
+ // HTTPMethodOverrideHeader is a commonly used
+ // http header to override a request method.
+ HTTPMethodOverrideHeader = "X-HTTP-Method-Override"
+ // HTTPMethodOverrideFormKey is a commonly used
+ // HTML form key to override a request method.
+ HTTPMethodOverrideFormKey = "_method"
+)
+
+// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for
+// the X-HTTP-Method-Override header or the _method form key, and overrides (if
+// valid) request.Method with its value.
+//
+// This is especially useful for HTTP clients that don't support many http verbs.
+// It isn't secure to override e.g a GET to a POST, so only POST requests are
+// considered. Likewise, the override method can only be a "write" method: PUT,
+// PATCH or DELETE.
+//
+// Form method takes precedence over header method.
+func HTTPMethodOverrideHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == http.MethodPost {
+ om := r.FormValue(HTTPMethodOverrideFormKey)
+ if om == "" {
+ om = r.Header.Get(HTTPMethodOverrideHeader)
+ }
+ if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete {
+ r.Method = om
+ }
+ }
+ h.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go
new file mode 100644
index 0000000000000..2badb6fbff844
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/logging.go
@@ -0,0 +1,246 @@
+// Copyright 2013 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package handlers
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+ "unicode/utf8"
+
+ "github.com/felixge/httpsnoop"
+)
+
+// Logging
+
+// LogFormatterParams is the structure any formatter will be handed when time to log comes.
+type LogFormatterParams struct {
+ Request *http.Request
+ URL url.URL
+ TimeStamp time.Time
+ StatusCode int
+ Size int
+}
+
+// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler.
+type LogFormatter func(writer io.Writer, params LogFormatterParams)
+
+// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its
+// friends
+
+type loggingHandler struct {
+ writer io.Writer
+ handler http.Handler
+ formatter LogFormatter
+}
+
+func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ t := time.Now()
+ logger, w := makeLogger(w)
+ url := *req.URL
+
+ h.handler.ServeHTTP(w, req)
+ if req.MultipartForm != nil {
+ err := req.MultipartForm.RemoveAll()
+ if err != nil {
+ return
+ }
+ }
+
+ params := LogFormatterParams{
+ Request: req,
+ URL: url,
+ TimeStamp: t,
+ StatusCode: logger.Status(),
+ Size: logger.Size(),
+ }
+
+ h.formatter(h.writer, params)
+}
+
+func makeLogger(w http.ResponseWriter) (*responseLogger, http.ResponseWriter) {
+ logger := &responseLogger{w: w, status: http.StatusOK}
+ return logger, httpsnoop.Wrap(w, httpsnoop.Hooks{
+ Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
+ return logger.Write
+ },
+ WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
+ return logger.WriteHeader
+ },
+ })
+}
+
+const lowerhex = "0123456789abcdef"
+
+func appendQuoted(buf []byte, s string) []byte {
+ var runeTmp [utf8.UTFMax]byte
+ for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1
+ r := rune(s[0])
+ width = 1
+ if r >= utf8.RuneSelf {
+ r, width = utf8.DecodeRuneInString(s)
+ }
+ if width == 1 && r == utf8.RuneError {
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ continue
+ }
+ if r == rune('"') || r == '\\' { // always backslashed
+ buf = append(buf, '\\')
+ buf = append(buf, byte(r))
+ continue
+ }
+ if strconv.IsPrint(r) {
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+ continue
+ }
+ switch r {
+ case '\a':
+ buf = append(buf, `\a`...)
+ case '\b':
+ buf = append(buf, `\b`...)
+ case '\f':
+ buf = append(buf, `\f`...)
+ case '\n':
+ buf = append(buf, `\n`...)
+ case '\r':
+ buf = append(buf, `\r`...)
+ case '\t':
+ buf = append(buf, `\t`...)
+ case '\v':
+ buf = append(buf, `\v`...)
+ default:
+ switch {
+ case r < ' ':
+ buf = append(buf, `\x`...)
+ buf = append(buf, lowerhex[s[0]>>4])
+ buf = append(buf, lowerhex[s[0]&0xF])
+ case r > utf8.MaxRune:
+ r = 0xFFFD
+ fallthrough
+ case r < 0x10000:
+ buf = append(buf, `\u`...)
+ for s := 12; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ default:
+ buf = append(buf, `\U`...)
+ for s := 28; s >= 0; s -= 4 {
+ buf = append(buf, lowerhex[r>>uint(s)&0xF])
+ }
+ }
+ }
+ }
+ return buf
+}
+
+// buildCommonLogLine builds a log entry for req in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte {
+ username := "-"
+ if url.User != nil {
+ if name := url.User.Username(); name != "" {
+ username = name
+ }
+ }
+
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ host = req.RemoteAddr
+ }
+
+ uri := req.RequestURI
+
+ // Requests using the CONNECT method over HTTP/2.0 must use
+ // the authority field (aka r.Host) to identify the target.
+ // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT
+ if req.ProtoMajor == 2 && req.Method == "CONNECT" {
+ uri = req.Host
+ }
+ if uri == "" {
+ uri = url.RequestURI()
+ }
+
+ buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2)
+ buf = append(buf, host...)
+ buf = append(buf, " - "...)
+ buf = append(buf, username...)
+ buf = append(buf, " ["...)
+ buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...)
+ buf = append(buf, `] "`...)
+ buf = append(buf, req.Method...)
+ buf = append(buf, " "...)
+ buf = appendQuoted(buf, uri)
+ buf = append(buf, " "...)
+ buf = append(buf, req.Proto...)
+ buf = append(buf, `" `...)
+ buf = append(buf, strconv.Itoa(status)...)
+ buf = append(buf, " "...)
+ buf = append(buf, strconv.Itoa(size)...)
+ return buf
+}
+
+// writeLog writes a log entry for req to w in Apache Common Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeLog(writer io.Writer, params LogFormatterParams) {
+ buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
+ buf = append(buf, '\n')
+ _, _ = writer.Write(buf)
+}
+
+// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
+// ts is the timestamp with which the entry should be logged.
+// status and size are used to provide the response HTTP status and size.
+func writeCombinedLog(writer io.Writer, params LogFormatterParams) {
+ buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
+ buf = append(buf, ` "`...)
+ buf = appendQuoted(buf, params.Request.Referer())
+ buf = append(buf, `" "`...)
+ buf = appendQuoted(buf, params.Request.UserAgent())
+ buf = append(buf, '"', '\n')
+ _, _ = writer.Write(buf)
+}
+
+// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Combined Log Format.
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -.
+func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h, writeCombinedLog}
+}
+
+// LoggingHandler return a http.Handler that wraps h and logs requests to out in
+// Apache Common Log Format (CLF).
+//
+// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format.
+//
+// LoggingHandler always sets the ident field of the log to -
+//
+// Example:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("This is a catch-all route"))
+// })
+// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
+// http.ListenAndServe(":1123", loggedRouter)
+func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
+ return loggingHandler{out, h, writeLog}
+}
+
+// CustomLoggingHandler provides a way to supply a custom log formatter
+// while taking advantage of the mechanisms in this package.
+func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler {
+ return loggingHandler{out, h, f}
+}
diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go
new file mode 100644
index 0000000000000..281d753e95a28
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/proxy_headers.go
@@ -0,0 +1,120 @@
+package handlers
+
+import (
+ "net/http"
+ "regexp"
+ "strings"
+)
+
+var (
+ // De-facto standard header keys.
+ xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
+ xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host")
+ xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto")
+ xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme")
+ xRealIP = http.CanonicalHeaderKey("X-Real-IP")
+)
+
+var (
+ // RFC7239 defines a new "Forwarded: " header designed to replace the
+ // existing use of X-Forwarded-* headers.
+ // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43.
+ forwarded = http.CanonicalHeaderKey("Forwarded")
+ // Allows for a sub-match of the first value after 'for=' to the next
+ // comma, semi-colon or space. The match is case-insensitive.
+ forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`)
+ // Allows for a sub-match for the first instance of scheme (http|https)
+ // prefixed by 'proto='. The match is case-insensitive.
+ protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`)
+)
+
+// ProxyHeaders inspects common reverse proxy headers and sets the corresponding
+// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP
+// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme
+// for the scheme (http|https), X-Forwarded-Host for the host and the RFC7239
+// Forwarded header, which may include both client IPs and schemes.
+//
+// NOTE: This middleware should only be used when behind a reverse
+// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are
+// configured not to) strip these headers from client requests, or where these
+// headers are accepted "as is" from a remote client (e.g. when Go is not behind
+// a proxy), can manifest as a vulnerability if your application uses these
+// headers for validating the 'trustworthiness' of a request.
+func ProxyHeaders(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ // Set the remote IP with the value passed from the proxy.
+ if fwd := getIP(r); fwd != "" {
+ r.RemoteAddr = fwd
+ }
+
+ // Set the scheme (proto) with the value passed from the proxy.
+ if scheme := getScheme(r); scheme != "" {
+ r.URL.Scheme = scheme
+ }
+ // Set the host with the value passed by the proxy
+ if r.Header.Get(xForwardedHost) != "" {
+ r.Host = r.Header.Get(xForwardedHost)
+ }
+ // Call the next handler in the chain.
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
+// Forwarded headers (in that order).
+func getIP(r *http.Request) string {
+ var addr string
+
+ switch {
+ case r.Header.Get(xForwardedFor) != "":
+ fwd := r.Header.Get(xForwardedFor)
+ // Only grab the first (client) address. Note that '192.168.0.1,
+ // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
+ // the first may represent forwarding proxies earlier in the chain.
+ s := strings.Index(fwd, ", ")
+ if s == -1 {
+ s = len(fwd)
+ }
+ addr = fwd[:s]
+ case r.Header.Get(xRealIP) != "":
+ addr = r.Header.Get(xRealIP)
+ case r.Header.Get(forwarded) != "":
+ // match should contain at least two elements if the protocol was
+ // specified in the Forwarded header. The first element will always be
+ // the 'for=' capture, which we ignore. In the case of multiple IP
+ // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only
+ // extract the first, which should be the client IP.
+ if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 {
+ // IPv6 addresses in Forwarded headers are quoted-strings. We strip
+ // these quotes.
+ addr = strings.Trim(match[1], `"`)
+ }
+ }
+
+ return addr
+}
+
+// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239
+// Forwarded headers (in that order).
+func getScheme(r *http.Request) string {
+ var scheme string
+
+ // Retrieve the scheme from X-Forwarded-Proto.
+ if proto := r.Header.Get(xForwardedProto); proto != "" {
+ scheme = strings.ToLower(proto)
+ } else if proto = r.Header.Get(xForwardedScheme); proto != "" {
+ scheme = strings.ToLower(proto)
+ } else if proto = r.Header.Get(forwarded); proto != "" {
+ // match should contain at least two elements if the protocol was
+ // specified in the Forwarded header. The first element will always be
+ // the 'proto=' capture, which we ignore. In the case of multiple proto
+ // parameters (invalid) we only extract the first.
+ if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 {
+ scheme = strings.ToLower(match[1])
+ }
+ }
+
+ return scheme
+}
diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go
new file mode 100644
index 0000000000000..0d4f955ecbda0
--- /dev/null
+++ b/vendor/github.com/gorilla/handlers/recovery.go
@@ -0,0 +1,98 @@
+package handlers
+
+import (
+ "log"
+ "net/http"
+ "runtime/debug"
+)
+
+// RecoveryHandlerLogger is an interface used by the recovering handler to print logs.
+type RecoveryHandlerLogger interface {
+ Println(...interface{})
+}
+
+type recoveryHandler struct {
+ handler http.Handler
+ logger RecoveryHandlerLogger
+ printStack bool
+}
+
+// RecoveryOption provides a functional approach to define
+// configuration for a handler; such as setting the logging
+// whether or not to print stack traces on panic.
+type RecoveryOption func(http.Handler)
+
+func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler {
+ for _, option := range opts {
+ option(h)
+ }
+
+ return h
+}
+
+// RecoveryHandler is HTTP middleware that recovers from a panic,
+// logs the panic, writes http.StatusInternalServerError, and
+// continues to the next handler.
+//
+// Example:
+//
+// r := mux.NewRouter()
+// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+// panic("Unexpected error!")
+// })
+//
+// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r))
+func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
+ return func(h http.Handler) http.Handler {
+ r := &recoveryHandler{handler: h}
+ return parseRecoveryOptions(r, opts...)
+ }
+}
+
+// RecoveryLogger is a functional option to override
+// the default logger.
+func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption {
+ return func(h http.Handler) {
+ r := h.(*recoveryHandler) //nolint:errcheck //TODO:
+ // @bharat-rajani should return type-assertion error but would break the API?
+ r.logger = logger
+ }
+}
+
+// PrintRecoveryStack is a functional option to enable
+// or disable printing stack traces on panic.
+func PrintRecoveryStack(shouldPrint bool) RecoveryOption {
+ return func(h http.Handler) {
+ r := h.(*recoveryHandler) //nolint:errcheck //TODO:
+ // @bharat-rajani should return type-assertion error but would break the API?
+ r.printStack = shouldPrint
+ }
+}
+
+func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ defer func() {
+ if err := recover(); err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ h.log(err)
+ }
+ }()
+
+ h.handler.ServeHTTP(w, req)
+}
+
+func (h recoveryHandler) log(v ...interface{}) {
+ if h.logger != nil {
+ h.logger.Println(v...)
+ } else {
+ log.Println(v...)
+ }
+
+ if h.printStack {
+ stack := string(debug.Stack())
+ if h.logger != nil {
+ h.logger.Println(stack)
+ } else {
+ log.Println(stack)
+ }
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index a22953805c633..4528059ca6815 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -1,5 +1,5 @@
-# This is an example goreleaser.yaml file with some sane defaults.
-# Make sure to check the documentation at http://goreleaser.com
+version: 2
+
before:
hooks:
- ./gen.sh
@@ -99,7 +99,7 @@ archives:
checksum:
name_template: 'checksums.txt'
snapshot:
- name_template: "{{ .Tag }}-next"
+ version_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 684a30853ab64..de264c85a5ad4 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,13 @@ This package provides various compression algorithms.
# changelog
+* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
+ * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
+ * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
+ * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
+ * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
+ * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
+
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go
index 0c9088adfee0c..20b802270a717 100644
--- a/vendor/github.com/klauspost/compress/s2/encode.go
+++ b/vendor/github.com/klauspost/compress/s2/encode.go
@@ -9,6 +9,9 @@ import (
"encoding/binary"
"math"
"math/bits"
+ "sync"
+
+ "github.com/klauspost/compress/internal/race"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
@@ -52,6 +55,8 @@ func Encode(dst, src []byte) []byte {
return dst[:d]
}
+var estblockPool [2]sync.Pool
+
// EstimateBlockSize will perform a very fast compression
// without outputting the result and return the compressed output size.
// The function returns -1 if no improvement could be achieved.
@@ -61,9 +66,25 @@ func EstimateBlockSize(src []byte) (d int) {
return -1
}
if len(src) <= 1024 {
- d = calcBlockSizeSmall(src)
+ const sz, pool = 2048, 0
+ tmp, ok := estblockPool[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer estblockPool[pool].Put(tmp)
+
+ d = calcBlockSizeSmall(src, tmp)
} else {
- d = calcBlockSize(src)
+ const sz, pool = 32768, 1
+ tmp, ok := estblockPool[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer estblockPool[pool].Put(tmp)
+
+ d = calcBlockSize(src, tmp)
}
if d == 0 {
diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
index 4f45206a4ef49..7aadd255fe3b9 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
@@ -3,10 +3,16 @@
package s2
-import "github.com/klauspost/compress/internal/race"
+import (
+ "sync"
+
+ "github.com/klauspost/compress/internal/race"
+)
const hasAmd64Asm = true
+var encPools [4]sync.Pool
+
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
@@ -29,23 +35,60 @@ func encodeBlock(dst, src []byte) (d int) {
)
if len(src) >= 4<<20 {
- return encodeBlockAsm(dst, src)
+ const sz, pool = 65536, 0
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
- return encodeBlockAsm4MB(dst, src)
+ const sz, pool = 65536, 0
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm4MB(dst, src, tmp)
}
if len(src) >= limit10B {
- return encodeBlockAsm12B(dst, src)
+ const sz, pool = 16384, 1
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
- return encodeBlockAsm10B(dst, src)
+ const sz, pool = 4096, 2
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
- return encodeBlockAsm8B(dst, src)
+ const sz, pool = 1024, 3
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeBlockAsm8B(dst, src, tmp)
}
+var encBetterPools [5]sync.Pool
+
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
@@ -68,21 +111,59 @@ func encodeBlockBetter(dst, src []byte) (d int) {
)
if len(src) > 4<<20 {
- return encodeBetterBlockAsm(dst, src)
+ const sz, pool = 589824, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeBetterBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
- return encodeBetterBlockAsm4MB(dst, src)
+ const sz, pool = 589824, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+
+ return encodeBetterBlockAsm4MB(dst, src, tmp)
}
if len(src) >= limit10B {
- return encodeBetterBlockAsm12B(dst, src)
+ const sz, pool = 81920, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+
+ return encodeBetterBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
- return encodeBetterBlockAsm10B(dst, src)
+ const sz, pool = 20480, 1
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeBetterBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
- return encodeBetterBlockAsm8B(dst, src)
+
+ const sz, pool = 5120, 2
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeBetterBlockAsm8B(dst, src, tmp)
}
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
@@ -105,22 +186,57 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
// Use 8 bit table when less than...
limit8B = 512
)
- if len(src) >= 64<<10 {
- return encodeSnappyBlockAsm(dst, src)
+ if len(src) > 65536 {
+ const sz, pool = 65536, 0
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm(dst, src, tmp)
}
if len(src) >= limit12B {
- return encodeSnappyBlockAsm64K(dst, src)
+ const sz, pool = 65536, 0
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm64K(dst, src, tmp)
}
if len(src) >= limit10B {
- return encodeSnappyBlockAsm12B(dst, src)
+ const sz, pool = 16384, 1
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
- return encodeSnappyBlockAsm10B(dst, src)
+ const sz, pool = 4096, 2
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
- return encodeSnappyBlockAsm8B(dst, src)
+ const sz, pool = 1024, 3
+ tmp, ok := encPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encPools[pool].Put(tmp)
+ return encodeSnappyBlockAsm8B(dst, src, tmp)
}
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
@@ -143,20 +259,59 @@ func encodeBlockBetterSnappy(dst, src []byte) (d int) {
// Use 8 bit table when less than...
limit8B = 512
)
- if len(src) >= 64<<10 {
- return encodeSnappyBetterBlockAsm(dst, src)
+ if len(src) > 65536 {
+ const sz, pool = 589824, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeSnappyBetterBlockAsm(dst, src, tmp)
}
+
if len(src) >= limit12B {
- return encodeSnappyBetterBlockAsm64K(dst, src)
+ const sz, pool = 294912, 4
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+
+ return encodeSnappyBetterBlockAsm64K(dst, src, tmp)
}
if len(src) >= limit10B {
- return encodeSnappyBetterBlockAsm12B(dst, src)
+ const sz, pool = 81920, 0
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+
+ return encodeSnappyBetterBlockAsm12B(dst, src, tmp)
}
if len(src) >= limit8B {
- return encodeSnappyBetterBlockAsm10B(dst, src)
+ const sz, pool = 20480, 1
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeSnappyBetterBlockAsm10B(dst, src, tmp)
}
if len(src) < minNonLiteralBlockSize {
return 0
}
- return encodeSnappyBetterBlockAsm8B(dst, src)
+
+ const sz, pool = 5120, 2
+ tmp, ok := encBetterPools[pool].Get().(*[sz]byte)
+ if !ok {
+ tmp = &[sz]byte{}
+ }
+ race.WriteSlice(tmp[:])
+ defer encBetterPools[pool].Put(tmp)
+ return encodeSnappyBetterBlockAsm8B(dst, src, tmp)
}
diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go
index 6b393c34d376c..dd1c973ca51bf 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_go.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_go.go
@@ -317,7 +317,7 @@ func matchLen(a []byte, b []byte) int {
}
// input must be > inputMargin
-func calcBlockSize(src []byte) (d int) {
+func calcBlockSize(src []byte, _ *[32768]byte) (d int) {
// Initialize the hash table.
const (
tableBits = 13
@@ -503,7 +503,7 @@ emitRemainder:
}
// length must be > inputMargin.
-func calcBlockSizeSmall(src []byte) (d int) {
+func calcBlockSizeSmall(src []byte, _ *[2048]byte) (d int) {
// Initialize the hash table.
const (
tableBits = 9
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
index 297e41501ba76..f43aa8154355a 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
@@ -11,154 +11,154 @@ func _dummy_()
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm(dst []byte, src []byte) int
+func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4194304 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm4MB(dst []byte, src []byte) int
+func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int
// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm12B(dst []byte, src []byte) int
+func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm10B(dst []byte, src []byte) int
+func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBlockAsm8B(dst []byte, src []byte) int
+func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm(dst []byte, src []byte) int
+func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4194304 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
+func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int
// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm12B(dst []byte, src []byte) int
+func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm10B(dst []byte, src []byte) int
+func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeBetterBlockAsm8B(dst []byte, src []byte) int
+func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm(dst []byte, src []byte) int
+func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 65535 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
+func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int
// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
+func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
+func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
+func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 65535 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int
// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 16383 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4095 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 511 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func calcBlockSize(src []byte) int
+func calcBlockSize(src []byte, tmp *[32768]byte) int
// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 1024 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
//
//go:noescape
-func calcBlockSizeSmall(src []byte) int
+func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
index 2ff5b334017aa..df9be687be7a8 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -13,1270 +13,1271 @@ TEXT ·_dummy_(SB), $0
#endif
RET
-// func encodeBlockAsm(dst []byte, src []byte) int
+// func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm(SB), $65560-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeBlockAsm
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm
repeat_extend_back_loop_encodeBlockAsm:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm
repeat_extend_back_end_encodeBlockAsm:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 5(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 5(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_repeat_emit_encodeBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_repeat_emit_encodeBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_repeat_emit_encodeBlockAsm
four_bytes_repeat_emit_encodeBlockAsm:
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_repeat_emit_encodeBlockAsm
three_bytes_repeat_emit_encodeBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm
two_bytes_repeat_emit_encodeBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm
JMP memmove_long_repeat_emit_encodeBlockAsm
one_byte_repeat_emit_encodeBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm
emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm
memmove_long_repeat_emit_encodeBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm
matchlen_bsf_16repeat_extend_encodeBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm
matchlen_match8_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm
matchlen_bsf_8_repeat_extend_encodeBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm
matchlen_match4_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm
JB repeat_extend_forward_end_encodeBlockAsm
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm
matchlen_match1_repeat_extend_encodeBlockAsm:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm
// emitRepeat
emit_repeat_again_match_repeat_encodeBlockAsm:
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_match_repeat_encodeBlockAsm
cant_repeat_two_offset_match_repeat_encodeBlockAsm:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_match_repeat_encodeBlockAsm
- CMPL BX, $0x0100ffff
+ CMPL SI, $0x0100ffff
JB repeat_five_match_repeat_encodeBlockAsm
- LEAL -16842747(BX), BX
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(SI), SI
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_repeat_encodeBlockAsm
repeat_five_match_repeat_encodeBlockAsm:
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_match_repeat_encodeBlockAsm:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_match_repeat_encodeBlockAsm:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_match_repeat_encodeBlockAsm:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_match_repeat_encodeBlockAsm:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_as_copy_encodeBlockAsm:
// emitCopy
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB two_byte_offset_repeat_as_copy_encodeBlockAsm
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm
- MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(BX), BX
- ADDQ $0x05, AX
- CMPL BX, $0x04
+ MOVB $0xff, (CX)
+ MOVL DI, 1(CX)
+ LEAL -64(SI), SI
+ ADDQ $0x05, CX
+ CMPL SI, $0x04
JB four_bytes_remain_repeat_as_copy_encodeBlockAsm
// emitRepeat
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy:
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy
- CMPL BX, $0x0100ffff
+ CMPL SI, $0x0100ffff
JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy
- LEAL -16842747(BX), BX
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(SI), SI
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
four_bytes_remain_repeat_as_copy_encodeBlockAsm:
- TESTL BX, BX
+ TESTL SI, SI
JZ repeat_end_emit_encodeBlockAsm
- XORL DI, DI
- LEAL -1(DI)(BX*4), BX
- MOVB BL, (AX)
- MOVL SI, 1(AX)
- ADDQ $0x05, AX
+ XORL R8, R8
+ LEAL -1(R8)(SI*4), SI
+ MOVB SI, (CX)
+ MOVL DI, 1(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
two_byte_offset_repeat_as_copy_encodeBlockAsm:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- MOVL SI, R8
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ MOVL DI, R9
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- CMPL BX, $0x0100ffff
+ CMPL SI, $0x0100ffff
JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
- LEAL -16842747(BX), BX
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(SI), SI
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
long_offset_short_repeat_as_copy_encodeBlockAsm:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short
- CMPL BX, $0x0100ffff
+ CMPL SI, $0x0100ffff
JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short
- LEAL -16842747(BX), BX
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(SI), SI
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short
repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
two_byte_offset_short_repeat_as_copy_encodeBlockAsm:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm
emit_copy_three_repeat_as_copy_encodeBlockAsm:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm
no_repeat_found_encodeBlockAsm:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm
candidate3_match_encodeBlockAsm:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm
candidate2_match_encodeBlockAsm:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm
match_extend_back_loop_encodeBlockAsm:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm
JMP match_extend_back_loop_encodeBlockAsm
match_extend_back_end_encodeBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB three_bytes_match_emit_encodeBlockAsm
- CMPL DI, $0x01000000
+ CMPL R8, $0x01000000
JB four_bytes_match_emit_encodeBlockAsm
- MOVB $0xfc, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_encodeBlockAsm
four_bytes_match_emit_encodeBlockAsm:
- MOVL DI, R9
- SHRL $0x10, R9
- MOVB $0xf8, (AX)
- MOVW DI, 1(AX)
- MOVB R9, 3(AX)
- ADDQ $0x04, AX
+ MOVL R8, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (CX)
+ MOVW R8, 1(CX)
+ MOVB R10, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeBlockAsm
three_bytes_match_emit_encodeBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm
two_bytes_match_emit_encodeBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm
JMP memmove_long_match_emit_encodeBlockAsm
one_byte_match_emit_encodeBlockAsm:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm
emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm
memmove_long_match_emit_encodeBlockAsm:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm:
match_nolit_loop_encodeBlockAsm:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm
matchlen_bsf_16match_nolit_encodeBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm
matchlen_match8_match_nolit_encodeBlockAsm:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm
matchlen_bsf_8_match_nolit_encodeBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm
matchlen_match4_match_nolit_encodeBlockAsm:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm
JB match_nolit_end_encodeBlockAsm
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm
matchlen_match1_match_nolit_encodeBlockAsm:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB two_byte_offset_match_nolit_encodeBlockAsm
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE four_bytes_remain_match_nolit_encodeBlockAsm
- MOVB $0xff, (AX)
- MOVL BX, 1(AX)
- LEAL -64(R9), R9
- ADDQ $0x05, AX
- CMPL R9, $0x04
+ MOVB $0xff, (CX)
+ MOVL SI, 1(CX)
+ LEAL -64(R10), R10
+ ADDQ $0x05, CX
+ CMPL R10, $0x04
JB four_bytes_remain_match_nolit_encodeBlockAsm
// emitRepeat
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy:
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm_emit_copy
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm_emit_copy
- CMPL R9, $0x0100ffff
+ CMPL R10, $0x0100ffff
JB repeat_five_match_nolit_encodeBlockAsm_emit_copy
- LEAL -16842747(R9), R9
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy
repeat_five_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
four_bytes_remain_match_nolit_encodeBlockAsm:
- TESTL R9, R9
+ TESTL R10, R10
JZ match_nolit_emitcopy_end_encodeBlockAsm
- XORL SI, SI
- LEAL -1(SI)(R9*4), R9
- MOVB R9, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ XORL DI, DI
+ LEAL -1(DI)(R10*4), R10
+ MOVB R10, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
two_byte_offset_match_nolit_encodeBlockAsm:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- MOVL BX, DI
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ MOVL SI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b
- CMPL R9, $0x0100ffff
+ CMPL R10, $0x0100ffff
JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b
- LEAL -16842747(R9), R9
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b
repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
long_offset_short_match_nolit_encodeBlockAsm:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short:
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short
- CMPL R9, $0x0100ffff
+ CMPL R10, $0x0100ffff
JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short
- LEAL -16842747(R9), R9
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R10), R10
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short
repeat_five_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_four_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_three_match_nolit_encodeBlockAsm_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_match_nolit_encodeBlockAsm_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
two_byte_offset_short_match_nolit_encodeBlockAsm:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeBlockAsm
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm
emit_copy_three_match_nolit_encodeBlockAsm:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x32, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x32, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x32, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x32, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm
emit_remainder_encodeBlockAsm:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm
@@ -1286,41 +1287,41 @@ emit_remainder_ok_encodeBlockAsm:
JB three_bytes_emit_remainder_encodeBlockAsm
CMPL DX, $0x01000000
JB four_bytes_emit_remainder_encodeBlockAsm
- MOVB $0xfc, (AX)
- MOVL DX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL DX, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_encodeBlockAsm
four_bytes_emit_remainder_encodeBlockAsm:
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeBlockAsm
three_bytes_emit_remainder_encodeBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm
two_bytes_emit_remainder_encodeBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm
JMP memmove_long_emit_remainder_encodeBlockAsm
one_byte_emit_remainder_encodeBlockAsm:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -1336,73 +1337,73 @@ memmove_emit_remainder_encodeBlockAsm:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm
memmove_long_emit_remainder_encodeBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back:
MOVOU (SI), X4
@@ -1416,1199 +1417,1200 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBlockAsm4MB(dst []byte, src []byte) int
+// func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm4MB(SB), $65560-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm4MB(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm4MB:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm4MB
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm4MB:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm4MB
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeBlockAsm4MB
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm4MB
repeat_extend_back_loop_encodeBlockAsm4MB:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm4MB
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm4MB
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm4MB
repeat_extend_back_end_encodeBlockAsm4MB:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 4(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 4(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm4MB:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm4MB
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm4MB
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_repeat_emit_encodeBlockAsm4MB
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
three_bytes_repeat_emit_encodeBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
two_bytes_repeat_emit_encodeBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm4MB
JMP memmove_long_repeat_emit_encodeBlockAsm4MB
one_byte_repeat_emit_encodeBlockAsm4MB:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB
emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm4MB:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB
memmove_long_repeat_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm4MB:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm4MB
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm4MB
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB
matchlen_bsf_16repeat_extend_encodeBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match8_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm4MB
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm4MB
matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match4_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm4MB
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm4MB:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm4MB
JB repeat_extend_forward_end_encodeBlockAsm4MB
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_match1_repeat_extend_encodeBlockAsm4MB:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm4MB
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm4MB:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm4MB
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm4MB
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_match_repeat_encodeBlockAsm4MB
cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm4MB
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_match_repeat_encodeBlockAsm4MB
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_match_repeat_encodeBlockAsm4MB:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_match_repeat_encodeBlockAsm4MB:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_match_repeat_encodeBlockAsm4MB:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_match_repeat_encodeBlockAsm4MB:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_as_copy_encodeBlockAsm4MB:
// emitCopy
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
- MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(BX), BX
- ADDQ $0x05, AX
- CMPL BX, $0x04
+ MOVB $0xff, (CX)
+ MOVL DI, 1(CX)
+ LEAL -64(SI), SI
+ ADDQ $0x05, CX
+ CMPL SI, $0x04
JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB:
- TESTL BX, BX
+ TESTL SI, SI
JZ repeat_end_emit_encodeBlockAsm4MB
- XORL DI, DI
- LEAL -1(DI)(BX*4), BX
- MOVB BL, (AX)
- MOVL SI, 1(AX)
- ADDQ $0x05, AX
+ XORL R8, R8
+ LEAL -1(R8)(SI*4), SI
+ MOVB SI, (CX)
+ MOVL DI, 1(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
two_byte_offset_repeat_as_copy_encodeBlockAsm4MB:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
long_offset_short_repeat_as_copy_encodeBlockAsm4MB:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- CMPL BX, $0x00010100
+ CMPL SI, $0x00010100
JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short
- LEAL -65536(BX), BX
- MOVL BX, SI
- MOVW $0x001d, (AX)
- MOVW BX, 2(AX)
- SARL $0x10, SI
- MOVB SI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (CX)
+ MOVW SI, 2(CX)
+ SARL $0x10, DI
+ MOVB DI, 4(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm4MB
emit_copy_three_repeat_as_copy_encodeBlockAsm4MB:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm4MB:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm4MB
no_repeat_found_encodeBlockAsm4MB:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm4MB
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm4MB
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm4MB
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm4MB
candidate3_match_encodeBlockAsm4MB:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm4MB
candidate2_match_encodeBlockAsm4MB:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm4MB:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm4MB
match_extend_back_loop_encodeBlockAsm4MB:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm4MB
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm4MB
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm4MB
JMP match_extend_back_loop_encodeBlockAsm4MB
match_extend_back_end_encodeBlockAsm4MB:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 4(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 4(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm4MB:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm4MB
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm4MB
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm4MB
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB three_bytes_match_emit_encodeBlockAsm4MB
- MOVL DI, R9
- SHRL $0x10, R9
- MOVB $0xf8, (AX)
- MOVW DI, 1(AX)
- MOVB R9, 3(AX)
- ADDQ $0x04, AX
+ MOVL R8, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (CX)
+ MOVW R8, 1(CX)
+ MOVB R10, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeBlockAsm4MB
three_bytes_match_emit_encodeBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm4MB
two_bytes_match_emit_encodeBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm4MB
JMP memmove_long_match_emit_encodeBlockAsm4MB
one_byte_match_emit_encodeBlockAsm4MB:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm4MB
emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm4MB:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm4MB
memmove_long_match_emit_encodeBlockAsm4MB:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm4MB:
match_nolit_loop_encodeBlockAsm4MB:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm4MB
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm4MB
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm4MB
matchlen_bsf_16match_nolit_encodeBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm4MB
matchlen_match8_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm4MB
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm4MB
matchlen_bsf_8_match_nolit_encodeBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm4MB
matchlen_match4_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm4MB
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm4MB:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm4MB
JB match_nolit_end_encodeBlockAsm4MB
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm4MB
matchlen_match1_match_nolit_encodeBlockAsm4MB:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm4MB
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm4MB:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB two_byte_offset_match_nolit_encodeBlockAsm4MB
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB
- MOVB $0xff, (AX)
- MOVL BX, 1(AX)
- LEAL -64(R9), R9
- ADDQ $0x05, AX
- CMPL R9, $0x04
+ MOVB $0xff, (CX)
+ MOVL SI, 1(CX)
+ LEAL -64(R10), R10
+ ADDQ $0x05, CX
+ CMPL R10, $0x04
JB four_bytes_remain_match_nolit_encodeBlockAsm4MB
// emitRepeat
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
four_bytes_remain_match_nolit_encodeBlockAsm4MB:
- TESTL R9, R9
+ TESTL R10, R10
JZ match_nolit_emitcopy_end_encodeBlockAsm4MB
- XORL SI, SI
- LEAL -1(SI)(R9*4), R9
- MOVB R9, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ XORL DI, DI
+ LEAL -1(DI)(R10*4), R10
+ MOVB R10, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
two_byte_offset_match_nolit_encodeBlockAsm4MB:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm4MB
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
-
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
+
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
long_offset_short_match_nolit_encodeBlockAsm4MB:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short
- CMPL R9, $0x00010100
+ CMPL R10, $0x00010100
JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short
- LEAL -65536(R9), R9
- MOVL R9, BX
- MOVW $0x001d, (AX)
- MOVW R9, 2(AX)
- SARL $0x10, BX
- MOVB BL, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (CX)
+ MOVW R10, 2(CX)
+ SARL $0x10, SI
+ MOVB SI, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
two_byte_offset_short_match_nolit_encodeBlockAsm4MB:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeBlockAsm4MB
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
emit_copy_three_match_nolit_encodeBlockAsm4MB:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm4MB:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm4MB
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm4MB:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x32, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x32, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x32, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x32, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm4MB
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm4MB
emit_remainder_encodeBlockAsm4MB:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 4(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 4(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm4MB:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm4MB
@@ -2618,33 +2620,33 @@ emit_remainder_ok_encodeBlockAsm4MB:
JB three_bytes_emit_remainder_encodeBlockAsm4MB
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeBlockAsm4MB
three_bytes_emit_remainder_encodeBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm4MB
two_bytes_emit_remainder_encodeBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm4MB
JMP memmove_long_emit_remainder_encodeBlockAsm4MB
one_byte_emit_remainder_encodeBlockAsm4MB:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm4MB:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -2660,73 +2662,73 @@ memmove_emit_remainder_encodeBlockAsm4MB:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm4MB:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB
memmove_long_emit_remainder_encodeBlockAsm4MB:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back:
MOVOU (SI), X4
@@ -2740,967 +2742,968 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm4MB:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBlockAsm12B(dst []byte, src []byte) int
+// func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm12B(SB), $16408-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000080, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm12B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000080, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm12B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm12B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm12B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm12B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x000000cf1bbcdcbb, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x18, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x18, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x34, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x18, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x18, R11
+ IMULQ R9, R11
+ SHRQ $0x34, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x18, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeBlockAsm12B
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm12B
repeat_extend_back_loop_encodeBlockAsm12B:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm12B
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm12B
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm12B
repeat_extend_back_end_encodeBlockAsm12B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm12B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm12B
JB three_bytes_repeat_emit_encodeBlockAsm12B
three_bytes_repeat_emit_encodeBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm12B
two_bytes_repeat_emit_encodeBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm12B
JMP memmove_long_repeat_emit_encodeBlockAsm12B
one_byte_repeat_emit_encodeBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B
emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm12B
memmove_long_repeat_emit_encodeBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm12B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm12B
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm12B
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm12B
matchlen_bsf_16repeat_extend_encodeBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match8_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm12B
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm12B
matchlen_bsf_8_repeat_extend_encodeBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match4_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm12B
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm12B:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm12B
JB repeat_extend_forward_end_encodeBlockAsm12B
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm12B
matchlen_match1_repeat_extend_encodeBlockAsm12B:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm12B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm12B:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm12B
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm12B
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_match_repeat_encodeBlockAsm12B
cant_repeat_two_offset_match_repeat_encodeBlockAsm12B:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm12B
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_match_repeat_encodeBlockAsm12B:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_match_repeat_encodeBlockAsm12B:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_match_repeat_encodeBlockAsm12B:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_as_copy_encodeBlockAsm12B:
// emitCopy
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
long_offset_short_repeat_as_copy_encodeBlockAsm12B:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm12B
emit_copy_three_repeat_as_copy_encodeBlockAsm12B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm12B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm12B
no_repeat_found_encodeBlockAsm12B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm12B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm12B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm12B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm12B
candidate3_match_encodeBlockAsm12B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm12B
candidate2_match_encodeBlockAsm12B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm12B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm12B
match_extend_back_loop_encodeBlockAsm12B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm12B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm12B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm12B
JMP match_extend_back_loop_encodeBlockAsm12B
match_extend_back_end_encodeBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm12B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm12B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm12B
JB three_bytes_match_emit_encodeBlockAsm12B
three_bytes_match_emit_encodeBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm12B
two_bytes_match_emit_encodeBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm12B
JMP memmove_long_match_emit_encodeBlockAsm12B
one_byte_match_emit_encodeBlockAsm12B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm12B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm12B
emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm12B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm12B
memmove_long_match_emit_encodeBlockAsm12B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm12B:
match_nolit_loop_encodeBlockAsm12B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm12B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm12B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm12B
matchlen_bsf_16match_nolit_encodeBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm12B
matchlen_match8_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm12B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm12B
matchlen_bsf_8_match_nolit_encodeBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm12B
matchlen_match4_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm12B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm12B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm12B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm12B
JB match_nolit_end_encodeBlockAsm12B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm12B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm12B
matchlen_match1_match_nolit_encodeBlockAsm12B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm12B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm12B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm12B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
long_offset_short_match_nolit_encodeBlockAsm12B:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
- JMP match_nolit_emitcopy_end_encodeBlockAsm12B
-
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
two_byte_offset_short_match_nolit_encodeBlockAsm12B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm12B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeBlockAsm12B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm12B
emit_copy_three_match_nolit_encodeBlockAsm12B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm12B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm12B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm12B:
- MOVQ $0x000000cf1bbcdcbb, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x18, DI
- IMULQ R8, DI
- SHRQ $0x34, DI
- SHLQ $0x18, BX
- IMULQ R8, BX
- SHRQ $0x34, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x000000cf1bbcdcbb, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x18, R8
+ IMULQ R9, R8
+ SHRQ $0x34, R8
+ SHLQ $0x18, SI
+ IMULQ R9, SI
+ SHRQ $0x34, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm12B
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm12B
emit_remainder_encodeBlockAsm12B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm12B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm12B
@@ -3709,26 +3712,26 @@ emit_remainder_ok_encodeBlockAsm12B:
JB three_bytes_emit_remainder_encodeBlockAsm12B
three_bytes_emit_remainder_encodeBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm12B
two_bytes_emit_remainder_encodeBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm12B
JMP memmove_long_emit_remainder_encodeBlockAsm12B
one_byte_emit_remainder_encodeBlockAsm12B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -3744,73 +3747,73 @@ memmove_emit_remainder_encodeBlockAsm12B:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm12B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm12B
memmove_long_emit_remainder_encodeBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back:
MOVOU (SI), X4
@@ -3824,967 +3827,968 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm12B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBlockAsm10B(dst []byte, src []byte) int
+// func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm10B(SB), $4120-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000020, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm10B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000020, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm10B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm10B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm10B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm10B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x36, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x36, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeBlockAsm10B
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm10B
repeat_extend_back_loop_encodeBlockAsm10B:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm10B
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm10B
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm10B
repeat_extend_back_end_encodeBlockAsm10B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm10B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm10B
JB three_bytes_repeat_emit_encodeBlockAsm10B
three_bytes_repeat_emit_encodeBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm10B
two_bytes_repeat_emit_encodeBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm10B
JMP memmove_long_repeat_emit_encodeBlockAsm10B
one_byte_repeat_emit_encodeBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B
emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm10B
memmove_long_repeat_emit_encodeBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm10B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm10B
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm10B
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm10B
matchlen_bsf_16repeat_extend_encodeBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match8_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm10B
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm10B
matchlen_bsf_8_repeat_extend_encodeBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match4_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm10B
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm10B:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm10B
JB repeat_extend_forward_end_encodeBlockAsm10B
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm10B
matchlen_match1_repeat_extend_encodeBlockAsm10B:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm10B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm10B:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm10B
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm10B
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_match_repeat_encodeBlockAsm10B
cant_repeat_two_offset_match_repeat_encodeBlockAsm10B:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm10B
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_match_repeat_encodeBlockAsm10B:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_match_repeat_encodeBlockAsm10B:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_match_repeat_encodeBlockAsm10B:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_as_copy_encodeBlockAsm10B:
// emitCopy
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
long_offset_short_repeat_as_copy_encodeBlockAsm10B:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
- MOVL BX, DI
- LEAL -4(BX), BX
- CMPL DI, $0x08
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- CMPL DI, $0x0c
+ CMPL R8, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short:
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm10B
emit_copy_three_repeat_as_copy_encodeBlockAsm10B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm10B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm10B
no_repeat_found_encodeBlockAsm10B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm10B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm10B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm10B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm10B
candidate3_match_encodeBlockAsm10B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm10B
candidate2_match_encodeBlockAsm10B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm10B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm10B
match_extend_back_loop_encodeBlockAsm10B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm10B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm10B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm10B
JMP match_extend_back_loop_encodeBlockAsm10B
match_extend_back_end_encodeBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm10B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm10B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm10B
JB three_bytes_match_emit_encodeBlockAsm10B
three_bytes_match_emit_encodeBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm10B
two_bytes_match_emit_encodeBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm10B
JMP memmove_long_match_emit_encodeBlockAsm10B
one_byte_match_emit_encodeBlockAsm10B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm10B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm10B
emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm10B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm10B
memmove_long_match_emit_encodeBlockAsm10B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm10B:
match_nolit_loop_encodeBlockAsm10B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm10B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm10B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm10B
matchlen_bsf_16match_nolit_encodeBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm10B
matchlen_match8_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm10B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm10B
matchlen_bsf_8_match_nolit_encodeBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm10B
matchlen_match4_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm10B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm10B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm10B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm10B
JB match_nolit_end_encodeBlockAsm10B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm10B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm10B
matchlen_match1_match_nolit_encodeBlockAsm10B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm10B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm10B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm10B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
long_offset_short_match_nolit_encodeBlockAsm10B:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R9, SI
- LEAL -4(R9), R9
- CMPL SI, $0x08
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short:
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
two_byte_offset_short_match_nolit_encodeBlockAsm10B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm10B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeBlockAsm10B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm10B
emit_copy_three_match_nolit_encodeBlockAsm10B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm10B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm10B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm10B:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x36, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x36, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x36, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x36, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm10B
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm10B
emit_remainder_encodeBlockAsm10B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm10B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm10B
@@ -4793,26 +4797,26 @@ emit_remainder_ok_encodeBlockAsm10B:
JB three_bytes_emit_remainder_encodeBlockAsm10B
three_bytes_emit_remainder_encodeBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm10B
two_bytes_emit_remainder_encodeBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm10B
JMP memmove_long_emit_remainder_encodeBlockAsm10B
one_byte_emit_remainder_encodeBlockAsm10B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -4828,73 +4832,73 @@ memmove_emit_remainder_encodeBlockAsm10B:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm10B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm10B
memmove_long_emit_remainder_encodeBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back:
MOVOU (SI), X4
@@ -4908,943 +4912,944 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm10B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBlockAsm8B(dst []byte, src []byte) int
+// func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBlockAsm8B(SB), $1048-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000008, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBlockAsm8B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000008, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBlockAsm8B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBlockAsm8B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeBlockAsm8B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBlockAsm8B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x38, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x38, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x38, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
- JNE no_repeat_found_encodeBlockAsm8B
- LEAL 1(CX), SI
- MOVL 12(SP), DI
- MOVL SI, BX
- SUBL 16(SP), BX
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x38, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x38, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
+ JNE no_repeat_found_encodeBlockAsm8B
+ LEAL 1(DX), DI
+ MOVL 12(SP), R8
+ MOVL DI, SI
+ SUBL 16(SP), SI
JZ repeat_extend_back_end_encodeBlockAsm8B
repeat_extend_back_loop_encodeBlockAsm8B:
- CMPL SI, DI
+ CMPL DI, R8
JBE repeat_extend_back_end_encodeBlockAsm8B
- MOVB -1(DX)(BX*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(SI*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeBlockAsm8B
- LEAL -1(SI), SI
- DECL BX
+ LEAL -1(DI), DI
+ DECL SI
JNZ repeat_extend_back_loop_encodeBlockAsm8B
repeat_extend_back_end_encodeBlockAsm8B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeBlockAsm8B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeBlockAsm8B
JB three_bytes_repeat_emit_encodeBlockAsm8B
three_bytes_repeat_emit_encodeBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeBlockAsm8B
two_bytes_repeat_emit_encodeBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeBlockAsm8B
JMP memmove_long_repeat_emit_encodeBlockAsm8B
one_byte_repeat_emit_encodeBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B
emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_repeat_emit_encodeBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeBlockAsm8B
memmove_long_repeat_emit_encodeBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R11
- SHRQ $0x05, R11
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R12
- SUBQ R10, R12
- DECQ R11
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R12
+ SHRQ $0x05, R12
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R13
+ SUBQ R11, R13
+ DECQ R12
JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R12*1), R10
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R10)(R13*1), R11
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R11
ADDQ $0x20, R13
- ADDQ $0x20, R10
- ADDQ $0x20, R12
- DECQ R11
+ DECQ R12
JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R12*1), X4
- MOVOU -16(R9)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ R8, R12
+ MOVOU -32(R10)(R13*1), X4
+ MOVOU -16(R10)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R9, R13
JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeBlockAsm8B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), R8
- SUBL CX, R8
- LEAQ (DX)(CX*1), R9
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R9
+ SUBL DX, R9
+ LEAQ (BX)(DX*1), R10
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x10
+ CMPL R9, $0x10
JB matchlen_match8_repeat_extend_encodeBlockAsm8B
- MOVQ (R9)(R11*1), R10
- MOVQ 8(R9)(R11*1), R12
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ MOVQ 8(R10)(R12*1), R13
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B
- XORQ 8(BX)(R11*1), R12
+ XORQ 8(SI)(R12*1), R13
JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm8B
- LEAL -16(R8), R8
- LEAL 16(R11), R11
+ LEAL -16(R9), R9
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm8B
matchlen_bsf_16repeat_extend_encodeBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match8_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x08
+ CMPL R9, $0x08
JB matchlen_match4_repeat_extend_encodeBlockAsm8B
- MOVQ (R9)(R11*1), R10
- XORQ (BX)(R11*1), R10
+ MOVQ (R10)(R12*1), R11
+ XORQ (SI)(R12*1), R11
JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B
- LEAL -8(R8), R8
- LEAL 8(R11), R11
+ LEAL -8(R9), R9
+ LEAL 8(R12), R12
JMP matchlen_match4_repeat_extend_encodeBlockAsm8B
matchlen_bsf_8_repeat_extend_encodeBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match4_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x04
+ CMPL R9, $0x04
JB matchlen_match2_repeat_extend_encodeBlockAsm8B
- MOVL (R9)(R11*1), R10
- CMPL (BX)(R11*1), R10
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
- LEAL -4(R8), R8
- LEAL 4(R11), R11
+ LEAL -4(R9), R9
+ LEAL 4(R12), R12
matchlen_match2_repeat_extend_encodeBlockAsm8B:
- CMPL R8, $0x01
+ CMPL R9, $0x01
JE matchlen_match1_repeat_extend_encodeBlockAsm8B
JB repeat_extend_forward_end_encodeBlockAsm8B
- MOVW (R9)(R11*1), R10
- CMPW (BX)(R11*1), R10
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
- LEAL 2(R11), R11
- SUBL $0x02, R8
+ LEAL 2(R12), R12
+ SUBL $0x02, R9
JZ repeat_extend_forward_end_encodeBlockAsm8B
matchlen_match1_repeat_extend_encodeBlockAsm8B:
- MOVB (R9)(R11*1), R10
- CMPB (BX)(R11*1), R10
+ MOVB (R10)(R12*1), R11
+ CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm8B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
repeat_extend_forward_end_encodeBlockAsm8B:
- ADDL R11, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
- TESTL DI, DI
+ ADDL R12, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
+ TESTL R8, R8
JZ repeat_as_copy_encodeBlockAsm8B
// emitRepeat
- MOVL BX, SI
- LEAL -4(BX), BX
- CMPL SI, $0x08
+ MOVL SI, DI
+ LEAL -4(SI), SI
+ CMPL DI, $0x08
JBE repeat_two_match_repeat_encodeBlockAsm8B
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B
cant_repeat_two_offset_match_repeat_encodeBlockAsm8B:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_match_repeat_encodeBlockAsm8B
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_match_repeat_encodeBlockAsm8B:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_match_repeat_encodeBlockAsm8B:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_as_copy_encodeBlockAsm8B:
// emitCopy
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B
- MOVL $0x00000001, DI
- LEAL 16(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, BX
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, SI
// emitRepeat
- LEAL -4(BX), BX
+ LEAL -4(SI), SI
JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- MOVL BX, SI
- LEAL -4(BX), BX
- CMPL SI, $0x08
+ MOVL SI, DI
+ LEAL -4(SI), SI
+ CMPL DI, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
long_offset_short_repeat_as_copy_encodeBlockAsm8B:
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
// emitRepeat
- MOVL BX, SI
- LEAL -4(BX), BX
- CMPL SI, $0x08
+ MOVL SI, DI
+ LEAL -4(SI), SI
+ CMPL DI, $0x08
JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
- CMPL SI, $0x0c
+ CMPL DI, $0x0c
JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- CMPL BX, $0x00000104
+ CMPL SI, $0x00000104
JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short
- LEAL -256(BX), BX
- MOVW $0x0019, (AX)
- MOVW BX, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(SI), SI
+ MOVW $0x0019, (CX)
+ MOVW SI, 2(CX)
+ ADDQ $0x04, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- LEAL -4(BX), BX
- MOVW $0x0015, (AX)
- MOVB BL, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(SI), SI
+ MOVW $0x0015, (CX)
+ MOVB SI, 2(CX)
+ ADDQ $0x03, CX
JMP repeat_end_emit_encodeBlockAsm8B
repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short:
- SHLL $0x02, BX
- ORL $0x01, BX
- MOVW BX, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
- XORQ DI, DI
- LEAL 1(DI)(BX*4), BX
- MOVB SI, 1(AX)
- SARL $0x08, SI
- SHLL $0x05, SI
- ORL SI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(CX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeBlockAsm8B
emit_copy_three_repeat_as_copy_encodeBlockAsm8B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeBlockAsm8B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeBlockAsm8B
no_repeat_found_encodeBlockAsm8B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBlockAsm8B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeBlockAsm8B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeBlockAsm8B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBlockAsm8B
candidate3_match_encodeBlockAsm8B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeBlockAsm8B
candidate2_match_encodeBlockAsm8B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeBlockAsm8B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBlockAsm8B
match_extend_back_loop_encodeBlockAsm8B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBlockAsm8B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBlockAsm8B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBlockAsm8B
JMP match_extend_back_loop_encodeBlockAsm8B
match_extend_back_end_encodeBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBlockAsm8B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeBlockAsm8B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeBlockAsm8B
JB three_bytes_match_emit_encodeBlockAsm8B
three_bytes_match_emit_encodeBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBlockAsm8B
two_bytes_match_emit_encodeBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeBlockAsm8B
JMP memmove_long_match_emit_encodeBlockAsm8B
one_byte_match_emit_encodeBlockAsm8B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBlockAsm8B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBlockAsm8B
emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBlockAsm8B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeBlockAsm8B
memmove_long_match_emit_encodeBlockAsm8B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
- ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
+ ADDQ $0x20, R12
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeBlockAsm8B:
match_nolit_loop_encodeBlockAsm8B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeBlockAsm8B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeBlockAsm8B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeBlockAsm8B
matchlen_bsf_16match_nolit_encodeBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeBlockAsm8B
matchlen_match8_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeBlockAsm8B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeBlockAsm8B
matchlen_bsf_8_match_nolit_encodeBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeBlockAsm8B
matchlen_match4_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeBlockAsm8B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeBlockAsm8B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeBlockAsm8B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeBlockAsm8B
JB match_nolit_end_encodeBlockAsm8B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeBlockAsm8B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeBlockAsm8B
matchlen_match1_match_nolit_encodeBlockAsm8B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm8B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeBlockAsm8B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE long_offset_short_match_nolit_encodeBlockAsm8B
- MOVL $0x00000001, SI
- LEAL 16(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R9
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R10
// emitRepeat
- LEAL -4(R9), R9
+ LEAL -4(R10), R10
JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- MOVL R9, BX
- LEAL -4(R9), R9
- CMPL BX, $0x08
+ MOVL R10, SI
+ LEAL -4(R10), R10
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
long_offset_short_match_nolit_encodeBlockAsm8B:
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R9, BX
- LEAL -4(R9), R9
- CMPL BX, $0x08
+ MOVL R10, SI
+ LEAL -4(R10), R10
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short:
- CMPL R9, $0x00000104
+ CMPL R10, $0x00000104
JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short
- LEAL -256(R9), R9
- MOVW $0x0019, (AX)
- MOVW R9, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R10), R10
+ MOVW $0x0019, (CX)
+ MOVW R10, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short:
- LEAL -4(R9), R9
- MOVW $0x0015, (AX)
- MOVB R9, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R10), R10
+ MOVW $0x0015, (CX)
+ MOVB R10, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short:
- SHLL $0x02, R9
- ORL $0x01, R9
- MOVW R9, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
- XORQ SI, SI
- LEAL 1(SI)(R9*4), R9
- MOVB BL, 1(AX)
- SARL $0x08, BX
- SHLL $0x05, BX
- ORL BX, R9
- MOVB R9, (AX)
- ADDQ $0x02, AX
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(CX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
two_byte_offset_short_match_nolit_encodeBlockAsm8B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeBlockAsm8B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBlockAsm8B
emit_copy_three_match_nolit_encodeBlockAsm8B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeBlockAsm8B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBlockAsm8B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBlockAsm8B:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x38, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x38, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x38, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x38, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeBlockAsm8B
- INCL CX
+ INCL DX
JMP search_loop_encodeBlockAsm8B
emit_remainder_encodeBlockAsm8B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBlockAsm8B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBlockAsm8B
@@ -5853,26 +5858,26 @@ emit_remainder_ok_encodeBlockAsm8B:
JB three_bytes_emit_remainder_encodeBlockAsm8B
three_bytes_emit_remainder_encodeBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBlockAsm8B
two_bytes_emit_remainder_encodeBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBlockAsm8B
JMP memmove_long_emit_remainder_encodeBlockAsm8B
one_byte_emit_remainder_encodeBlockAsm8B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -5888,73 +5893,73 @@ memmove_emit_remainder_encodeBlockAsm8B:
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBlockAsm8B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBlockAsm8B
memmove_long_emit_remainder_encodeBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back:
MOVOU (SI), X4
@@ -5968,961 +5973,962 @@ emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBlockAsm8B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm(dst []byte, src []byte) int
+// func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm(SB), $589848-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00001200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00001200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x07, BX
- CMPL BX, $0x63
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x07, SI
+ CMPL SI, $0x63
JBE check_maxskip_ok_encodeBetterBlockAsm
- LEAL 100(CX), BX
+ LEAL 100(DX), SI
JMP check_maxskip_cont_encodeBetterBlockAsm
check_maxskip_ok_encodeBetterBlockAsm:
- LEAL 1(CX)(BX*1), BX
+ LEAL 1(DX)(SI*1), SI
check_maxskip_cont_encodeBetterBlockAsm:
- CMPL BX, 8(SP)
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 524312(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 524312(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 524288(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 524288(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm
no_short_found_encodeBetterBlockAsm:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm
candidateS_match_encodeBetterBlockAsm:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm
match_extend_back_loop_encodeBetterBlockAsm:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm
JMP match_extend_back_loop_encodeBetterBlockAsm
match_extend_back_end_encodeBetterBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm
matchlen_bsf_16match_nolit_encodeBetterBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm
matchlen_match8_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm
matchlen_match4_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm
JB match_nolit_end_encodeBetterBlockAsm
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm
matchlen_match1_match_nolit_encodeBetterBlockAsm:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm
- CMPL R11, $0x01
+ CMPL R12, $0x01
JA match_length_ok_encodeBetterBlockAsm
- CMPL DI, $0x0000ffff
+ CMPL R8, $0x0000ffff
JBE match_length_ok_encodeBetterBlockAsm
- MOVL 20(SP), CX
- INCL CX
+ MOVL 20(SP), DX
+ INCL DX
JMP search_loop_encodeBetterBlockAsm
match_length_ok_encodeBetterBlockAsm:
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_encodeBetterBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_match_emit_encodeBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm
four_bytes_match_emit_encodeBetterBlockAsm:
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm
three_bytes_match_emit_encodeBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm
two_bytes_match_emit_encodeBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm
JMP memmove_long_match_emit_encodeBetterBlockAsm
one_byte_match_emit_encodeBetterBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm
emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm
memmove_long_match_emit_encodeBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB two_byte_offset_match_nolit_encodeBetterBlockAsm
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm
- MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(R11), R11
- ADDQ $0x05, AX
- CMPL R11, $0x04
+ MOVB $0xff, (CX)
+ MOVL R8, 1(CX)
+ LEAL -64(R12), R12
+ ADDQ $0x05, CX
+ CMPL R12, $0x04
JB four_bytes_remain_match_nolit_encodeBetterBlockAsm
// emitRepeat
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy:
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy
- CMPL R11, $0x0100ffff
+ CMPL R12, $0x0100ffff
JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy
- LEAL -16842747(R11), R11
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R12), R12
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
four_bytes_remain_match_nolit_encodeBetterBlockAsm:
- TESTL R11, R11
+ TESTL R12, R12
JZ match_nolit_emitcopy_end_encodeBetterBlockAsm
- XORL BX, BX
- LEAL -1(BX)(R11*4), R11
- MOVB R11, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ XORL SI, SI
+ LEAL -1(SI)(R12*4), R12
+ MOVB R12, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
two_byte_offset_match_nolit_encodeBetterBlockAsm:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- MOVL DI, R8
- SHRL $0x08, R8
- SHLL $0x05, R8
- ORL R8, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ MOVL R8, R9
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- CMPL R11, $0x0100ffff
+ CMPL R12, $0x0100ffff
JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
- LEAL -16842747(R11), R11
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R12), R12
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
long_offset_short_match_nolit_encodeBetterBlockAsm:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short
- CMPL R11, $0x0100ffff
+ CMPL R12, $0x0100ffff
JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short
- LEAL -16842747(R11), R11
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R12), R12
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short
repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
two_byte_offset_short_match_nolit_encodeBetterBlockAsm:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
emit_copy_three_match_nolit_encodeBetterBlockAsm:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
match_is_repeat_encodeBetterBlockAsm:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_match_emit_repeat_encodeBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
four_bytes_match_emit_repeat_encodeBetterBlockAsm:
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
three_bytes_match_emit_repeat_encodeBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
two_bytes_match_emit_repeat_encodeBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm
one_byte_match_emit_repeat_encodeBetterBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm
memmove_long_match_emit_repeat_encodeBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm:
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm
- CMPL R11, $0x0100ffff
+ CMPL R12, $0x0100ffff
JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm
- LEAL -16842747(R11), R11
- MOVL $0xfffb001d, (AX)
- MOVB $0xff, 4(AX)
- ADDQ $0x05, AX
+ LEAL -16842747(R12), R12
+ MOVL $0xfffb001d, (CX)
+ MOVB $0xff, 4(CX)
+ ADDQ $0x05, CX
JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm
repeat_five_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_four_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_three_match_nolit_repeat_encodeBetterBlockAsm:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_match_nolit_repeat_encodeBetterBlockAsm:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm
- CMPQ AX, (SP)
- JB match_nolit_dst_ok_encodeBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
- RET
-
-match_nolit_dst_ok_encodeBetterBlockAsm:
- MOVQ $0x00cf1bbcdcbfa563, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x32, R10
- SHLQ $0x08, R11
- IMULQ BX, R11
- SHRQ $0x2f, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x32, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 524312(SP)(R10*4)
- MOVL R13, 524312(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ CMPQ CX, (SP)
+ JB match_nolit_dst_ok_encodeBetterBlockAsm
+ MOVQ $0x00000000, ret+56(FP)
+ RET
+
+match_nolit_dst_ok_encodeBetterBlockAsm:
+ MOVQ $0x00cf1bbcdcbfa563, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x08, R10
+ IMULQ SI, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x32, R11
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 524288(AX)(R11*4)
+ MOVL R14, 524288(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x08, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x2f, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x08, R11
+ IMULQ SI, R11
+ SHRQ $0x2f, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm
emit_remainder_encodeBetterBlockAsm:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm
@@ -6932,41 +6938,41 @@ emit_remainder_ok_encodeBetterBlockAsm:
JB three_bytes_emit_remainder_encodeBetterBlockAsm
CMPL DX, $0x01000000
JB four_bytes_emit_remainder_encodeBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL DX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL DX, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm
four_bytes_emit_remainder_encodeBetterBlockAsm:
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm
three_bytes_emit_remainder_encodeBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm
two_bytes_emit_remainder_encodeBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm
JMP memmove_long_emit_remainder_encodeBetterBlockAsm
one_byte_emit_remainder_encodeBetterBlockAsm:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -6982,73 +6988,73 @@ memmove_emit_remainder_encodeBetterBlockAsm:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm
memmove_long_emit_remainder_encodeBetterBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back:
MOVOU (SI), X4
@@ -7062,903 +7068,904 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
+// func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00001200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm4MB(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00001200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm4MB:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm4MB
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm4MB:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x07, BX
- CMPL BX, $0x63
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x07, SI
+ CMPL SI, $0x63
JBE check_maxskip_ok_encodeBetterBlockAsm4MB
- LEAL 100(CX), BX
+ LEAL 100(DX), SI
JMP check_maxskip_cont_encodeBetterBlockAsm4MB
check_maxskip_ok_encodeBetterBlockAsm4MB:
- LEAL 1(CX)(BX*1), BX
+ LEAL 1(DX)(SI*1), SI
check_maxskip_cont_encodeBetterBlockAsm4MB:
- CMPL BX, 8(SP)
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm4MB
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 524312(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 524312(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 524288(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 524288(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm4MB
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm4MB
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm4MB
no_short_found_encodeBetterBlockAsm4MB:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm4MB
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm4MB
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm4MB
candidateS_match_encodeBetterBlockAsm4MB:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm4MB
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm4MB:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm4MB
match_extend_back_loop_encodeBetterBlockAsm4MB:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm4MB
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm4MB
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm4MB
JMP match_extend_back_loop_encodeBetterBlockAsm4MB
match_extend_back_end_encodeBetterBlockAsm4MB:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 4(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 4(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm4MB:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm4MB
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB
matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match8_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
JB match_nolit_end_encodeBetterBlockAsm4MB
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm4MB
matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm4MB
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm4MB:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm4MB
- CMPL R11, $0x01
+ CMPL R12, $0x01
JA match_length_ok_encodeBetterBlockAsm4MB
- CMPL DI, $0x0000ffff
+ CMPL R8, $0x0000ffff
JBE match_length_ok_encodeBetterBlockAsm4MB
- MOVL 20(SP), CX
- INCL CX
+ MOVL 20(SP), DX
+ INCL DX
JMP search_loop_encodeBetterBlockAsm4MB
match_length_ok_encodeBetterBlockAsm4MB:
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm4MB
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm4MB
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_encodeBetterBlockAsm4MB
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
three_bytes_match_emit_encodeBetterBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
two_bytes_match_emit_encodeBetterBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm4MB
JMP memmove_long_match_emit_encodeBetterBlockAsm4MB
one_byte_match_emit_encodeBetterBlockAsm4MB:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm4MB:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB
memmove_long_match_emit_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm4MB:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
- MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(R11), R11
- ADDQ $0x05, AX
- CMPL R11, $0x04
+ MOVB $0xff, (CX)
+ MOVL R8, 1(CX)
+ LEAL -64(R12), R12
+ ADDQ $0x05, CX
+ CMPL R12, $0x04
JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB:
- TESTL R11, R11
+ TESTL R12, R12
JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
- XORL BX, BX
- LEAL -1(BX)(R11*4), R11
- MOVB R11, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ XORL SI, SI
+ LEAL -1(SI)(R12*4), R12
+ MOVB R12, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
two_byte_offset_match_nolit_encodeBetterBlockAsm4MB:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
long_offset_short_match_nolit_encodeBetterBlockAsm4MB:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
emit_copy_three_match_nolit_encodeBetterBlockAsm4MB:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
match_is_repeat_encodeBetterBlockAsm4MB:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB
one_byte_match_emit_repeat_encodeBetterBlockAsm4MB:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB
memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB
- CMPL R11, $0x00010100
+ CMPL R12, $0x00010100
JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB
- LEAL -65536(R11), R11
- MOVL R11, DI
- MOVW $0x001d, (AX)
- MOVW R11, 2(AX)
- SARL $0x10, DI
- MOVB DI, 4(AX)
- ADDQ $0x05, AX
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (CX)
+ MOVW R12, 2(CX)
+ SARL $0x10, R8
+ MOVB R8, 4(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB:
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm4MB:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm4MB
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBetterBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBetterBlockAsm4MB:
- MOVQ $0x00cf1bbcdcbfa563, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x32, R10
- SHLQ $0x08, R11
- IMULQ BX, R11
- SHRQ $0x2f, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x32, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 524312(SP)(R10*4)
- MOVL R13, 524312(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x00cf1bbcdcbfa563, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x08, R10
+ IMULQ SI, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x32, R11
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 524288(AX)(R11*4)
+ MOVL R14, 524288(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm4MB:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm4MB
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x08, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x2f, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x08, R11
+ IMULQ SI, R11
+ SHRQ $0x2f, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm4MB
emit_remainder_encodeBetterBlockAsm4MB:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 4(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 4(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm4MB
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm4MB:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm4MB
@@ -7968,33 +7975,33 @@ emit_remainder_ok_encodeBetterBlockAsm4MB:
JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
three_bytes_emit_remainder_encodeBetterBlockAsm4MB:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
two_bytes_emit_remainder_encodeBetterBlockAsm4MB:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm4MB
JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB
one_byte_emit_remainder_encodeBetterBlockAsm4MB:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm4MB:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -8010,73 +8017,73 @@ memmove_emit_remainder_encodeBetterBlockAsm4MB:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB
memmove_long_emit_remainder_encodeBetterBlockAsm4MB:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back:
MOVOU (SI), X4
@@ -8090,756 +8097,757 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm12B(dst []byte, src []byte) int
+// func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm12B(SB), $81944-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000280, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm12B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000280, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm12B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm12B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm12B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm12B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x34, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 65560(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 65560(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x34, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 65536(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 65536(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm12B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm12B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm12B
no_short_found_encodeBetterBlockAsm12B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm12B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm12B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm12B
candidateS_match_encodeBetterBlockAsm12B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm12B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm12B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm12B
match_extend_back_loop_encodeBetterBlockAsm12B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm12B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm12B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm12B
JMP match_extend_back_loop_encodeBetterBlockAsm12B
match_extend_back_end_encodeBetterBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm12B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm12B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B
matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm12B
matchlen_match8_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm12B
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm12B
matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
JB match_nolit_end_encodeBetterBlockAsm12B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm12B
matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm12B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm12B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm12B
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm12B
JB three_bytes_match_emit_encodeBetterBlockAsm12B
three_bytes_match_emit_encodeBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm12B
two_bytes_match_emit_encodeBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm12B
JMP memmove_long_match_emit_encodeBetterBlockAsm12B
one_byte_match_emit_encodeBetterBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B
memmove_long_match_emit_encodeBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm12B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
long_offset_short_match_nolit_encodeBetterBlockAsm12B:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
emit_copy_three_match_nolit_encodeBetterBlockAsm12B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
match_is_repeat_encodeBetterBlockAsm12B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B
three_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
two_bytes_match_emit_repeat_encodeBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm12B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B
one_byte_match_emit_repeat_encodeBetterBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B
memmove_long_match_emit_repeat_encodeBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm12B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm12B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBetterBlockAsm12B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x32, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x34, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x32, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x34, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 65560(SP)(R10*4)
- MOVL R13, 65560(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x34, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x32, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x34, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 65536(AX)(R11*4)
+ MOVL R14, 65536(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm12B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm12B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x32, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm12B
emit_remainder_encodeBetterBlockAsm12B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm12B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm12B
@@ -8848,26 +8856,26 @@ emit_remainder_ok_encodeBetterBlockAsm12B:
JB three_bytes_emit_remainder_encodeBetterBlockAsm12B
three_bytes_emit_remainder_encodeBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
two_bytes_emit_remainder_encodeBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm12B
JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B
one_byte_emit_remainder_encodeBetterBlockAsm12B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -8883,73 +8891,73 @@ memmove_emit_remainder_encodeBetterBlockAsm12B:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B
memmove_long_emit_remainder_encodeBetterBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back:
MOVOU (SI), X4
@@ -8963,756 +8971,757 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
- JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm12B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm10B(dst []byte, src []byte) int
+// func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm10B(SB), $20504-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x000000a0, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm10B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x000000a0, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm10B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm10B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm10B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm10B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x36, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 16408(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 16408(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x36, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 16384(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 16384(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm10B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm10B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm10B
no_short_found_encodeBetterBlockAsm10B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm10B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm10B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm10B
candidateS_match_encodeBetterBlockAsm10B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm10B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm10B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm10B
match_extend_back_loop_encodeBetterBlockAsm10B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm10B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm10B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm10B
JMP match_extend_back_loop_encodeBetterBlockAsm10B
match_extend_back_end_encodeBetterBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm10B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm10B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B
matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm10B
matchlen_match8_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm10B
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm10B
matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
JB match_nolit_end_encodeBetterBlockAsm10B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm10B
matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm10B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm10B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm10B
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm10B
JB three_bytes_match_emit_encodeBetterBlockAsm10B
three_bytes_match_emit_encodeBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm10B
two_bytes_match_emit_encodeBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm10B
JMP memmove_long_match_emit_encodeBetterBlockAsm10B
one_byte_match_emit_encodeBetterBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B
memmove_long_match_emit_encodeBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm10B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
long_offset_short_match_nolit_encodeBetterBlockAsm10B:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
emit_copy_three_match_nolit_encodeBetterBlockAsm10B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
match_is_repeat_encodeBetterBlockAsm10B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B
three_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
two_bytes_match_emit_repeat_encodeBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm10B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B
one_byte_match_emit_repeat_encodeBetterBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B
memmove_long_match_emit_repeat_encodeBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B:
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm10B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm10B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBetterBlockAsm10B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x34, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x36, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x34, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x36, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 16408(SP)(R10*4)
- MOVL R13, 16408(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x36, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x34, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x36, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 16384(AX)(R11*4)
+ MOVL R14, 16384(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm10B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm10B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x34, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x34, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x34, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm10B
emit_remainder_encodeBetterBlockAsm10B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm10B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm10B
@@ -9721,26 +9730,26 @@ emit_remainder_ok_encodeBetterBlockAsm10B:
JB three_bytes_emit_remainder_encodeBetterBlockAsm10B
three_bytes_emit_remainder_encodeBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
two_bytes_emit_remainder_encodeBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm10B
JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B
one_byte_emit_remainder_encodeBetterBlockAsm10B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -9756,73 +9765,73 @@ memmove_emit_remainder_encodeBetterBlockAsm10B:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B
memmove_long_emit_remainder_encodeBetterBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back:
MOVOU (SI), X4
@@ -9836,742 +9845,743 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm10B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeBetterBlockAsm8B(dst []byte, src []byte) int
+// func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeBetterBlockAsm8B(SB), $5144-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000028, CX
- LEAQ 24(SP), DX
+TEXT ·encodeBetterBlockAsm8B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000028, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeBetterBlockAsm8B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeBetterBlockAsm8B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -6(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -6(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeBetterBlockAsm8B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm8B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x38, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 4120(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 4120(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x38, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 4096(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 4096(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm8B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeBetterBlockAsm8B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeBetterBlockAsm8B
no_short_found_encodeBetterBlockAsm8B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeBetterBlockAsm8B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeBetterBlockAsm8B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeBetterBlockAsm8B
candidateS_match_encodeBetterBlockAsm8B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeBetterBlockAsm8B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeBetterBlockAsm8B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeBetterBlockAsm8B
match_extend_back_loop_encodeBetterBlockAsm8B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeBetterBlockAsm8B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeBetterBlockAsm8B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeBetterBlockAsm8B
JMP match_extend_back_loop_encodeBetterBlockAsm8B
match_extend_back_end_encodeBetterBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeBetterBlockAsm8B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeBetterBlockAsm8B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B
matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeBetterBlockAsm8B
matchlen_match8_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeBetterBlockAsm8B
matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeBetterBlockAsm8B
matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
JB match_nolit_end_encodeBetterBlockAsm8B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeBetterBlockAsm8B
matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm8B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeBetterBlockAsm8B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL 16(SP), DI
+ CMPL 16(SP), R8
JEQ match_is_repeat_encodeBetterBlockAsm8B
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeBetterBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeBetterBlockAsm8B
JB three_bytes_match_emit_encodeBetterBlockAsm8B
three_bytes_match_emit_encodeBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeBetterBlockAsm8B
two_bytes_match_emit_encodeBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeBetterBlockAsm8B
JMP memmove_long_match_emit_encodeBetterBlockAsm8B
one_byte_match_emit_encodeBetterBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeBetterBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x04
+ CMPQ R9, $0x04
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4:
- MOVL (R9), R10
- MOVL R10, (AX)
+ MOVL (R10), R11
+ MOVL R11, (CX)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7:
- MOVL (R9), R10
- MOVL -4(R9)(R8*1), R9
- MOVL R10, (AX)
- MOVL R9, -4(AX)(R8*1)
+ MOVL (R10), R11
+ MOVL -4(R10)(R9*1), R10
+ MOVL R11, (CX)
+ MOVL R10, -4(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeBetterBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B
memmove_long_match_emit_encodeBetterBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeBetterBlockAsm8B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B
- MOVL $0x00000001, BX
- LEAL 16(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
- SUBL $0x08, R11
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
+ SUBL $0x08, R12
// emitRepeat
- LEAL -4(R11), R11
+ LEAL -4(R12), R12
JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
long_offset_short_match_nolit_encodeBetterBlockAsm8B:
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
-repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
- JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short:
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
emit_copy_three_match_nolit_encodeBetterBlockAsm8B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
match_is_repeat_encodeBetterBlockAsm8B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B
JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B
three_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
two_bytes_match_emit_repeat_encodeBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_repeat_encodeBetterBlockAsm8B
JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B
one_byte_match_emit_repeat_encodeBetterBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_repeat_encodeBetterBlockAsm8B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x04
+ CMPQ R8, $0x04
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4:
- MOVL (R8), R9
- MOVL R9, (AX)
+ MOVL (R9), R10
+ MOVL R10, (CX)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7:
- MOVL (R8), R9
- MOVL -4(R8)(DI*1), R8
- MOVL R9, (AX)
- MOVL R8, -4(AX)(DI*1)
+ MOVL (R9), R10
+ MOVL -4(R9)(R8*1), R9
+ MOVL R10, (CX)
+ MOVL R9, -4(CX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B
emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B
memmove_long_match_emit_repeat_encodeBetterBlockAsm8B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R12
- SUBQ R9, R12
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R13
+ SUBQ R10, R13
+ DECQ R11
JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R8)(R12*1), R9
- LEAQ -32(AX)(R12*1), R13
+ LEAQ -32(R9)(R13*1), R10
+ LEAQ -32(CX)(R13*1), R14
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R13)
- MOVOA X5, 16(R13)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R14)
+ MOVOA X5, 16(R14)
+ ADDQ $0x20, R14
+ ADDQ $0x20, R10
ADDQ $0x20, R13
- ADDQ $0x20, R9
- ADDQ $0x20, R12
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R8)(R12*1), X4
- MOVOU -16(R8)(R12*1), X5
- MOVOA X4, -32(AX)(R12*1)
- MOVOA X5, -16(AX)(R12*1)
- ADDQ $0x20, R12
- CMPQ DI, R12
+ MOVOU -32(R9)(R13*1), X4
+ MOVOU -16(R9)(R13*1), X5
+ MOVOA X4, -32(CX)(R13*1)
+ MOVOA X5, -16(CX)(R13*1)
+ ADDQ $0x20, R13
+ CMPQ R8, R13
JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitRepeat
- MOVL R11, BX
- LEAL -4(R11), R11
- CMPL BX, $0x08
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B
- CMPL BX, $0x0c
+ CMPL SI, $0x0c
JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B
cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B:
- CMPL R11, $0x00000104
+ CMPL R12, $0x00000104
JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B
- LEAL -256(R11), R11
- MOVW $0x0019, (AX)
- MOVW R11, 2(AX)
- ADDQ $0x04, AX
+ LEAL -256(R12), R12
+ MOVW $0x0019, (CX)
+ MOVW R12, 2(CX)
+ ADDQ $0x04, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B:
- LEAL -4(R11), R11
- MOVW $0x0015, (AX)
- MOVB R11, 2(AX)
- ADDQ $0x03, AX
+ LEAL -4(R12), R12
+ MOVW $0x0015, (CX)
+ MOVB R12, 2(CX)
+ ADDQ $0x03, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B:
- SHLL $0x02, R11
- ORL $0x01, R11
- MOVW R11, (AX)
- ADDQ $0x02, AX
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
- XORQ BX, BX
- LEAL 1(BX)(R11*4), R11
- MOVB DI, 1(AX)
- SARL $0x08, DI
- SHLL $0x05, DI
- ORL DI, R11
- MOVB R11, (AX)
- ADDQ $0x02, AX
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(CX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (CX)
+ ADDQ $0x02, CX
match_nolit_emitcopy_end_encodeBetterBlockAsm8B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeBetterBlockAsm8B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeBetterBlockAsm8B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x36, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x38, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x36, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x38, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 4120(SP)(R10*4)
- MOVL R13, 4120(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x38, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x36, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x38, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 4096(AX)(R11*4)
+ MOVL R14, 4096(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeBetterBlockAsm8B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeBetterBlockAsm8B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x36, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x36, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x36, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeBetterBlockAsm8B
emit_remainder_encodeBetterBlockAsm8B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeBetterBlockAsm8B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeBetterBlockAsm8B
@@ -10580,26 +10590,26 @@ emit_remainder_ok_encodeBetterBlockAsm8B:
JB three_bytes_emit_remainder_encodeBetterBlockAsm8B
three_bytes_emit_remainder_encodeBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
two_bytes_emit_remainder_encodeBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeBetterBlockAsm8B
JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B
one_byte_emit_remainder_encodeBetterBlockAsm8B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeBetterBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -10615,73 +10625,73 @@ memmove_emit_remainder_encodeBetterBlockAsm8B:
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B
memmove_long_emit_remainder_encodeBetterBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back:
MOVOU (SI), X4
@@ -10695,798 +10705,799 @@ emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeBetterBlockAsm8B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm(SB), $65560-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm
repeat_extend_back_loop_encodeSnappyBlockAsm:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm
repeat_extend_back_end_encodeSnappyBlockAsm:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 5(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 5(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_repeat_emit_encodeSnappyBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_repeat_emit_encodeSnappyBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
four_bytes_repeat_emit_encodeSnappyBlockAsm:
- MOVL BX, R9
- SHRL $0x10, R9
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R9, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R10, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
three_bytes_repeat_emit_encodeSnappyBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
two_bytes_repeat_emit_encodeSnappyBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm
one_byte_repeat_emit_encodeSnappyBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm
memmove_long_repeat_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match8_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
JB repeat_extend_forward_end_encodeSnappyBlockAsm
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
- MOVB $0xff, (AX)
- MOVL SI, 1(AX)
- LEAL -64(BX), BX
- ADDQ $0x05, AX
- CMPL BX, $0x04
+ MOVB $0xff, (CX)
+ MOVL DI, 1(CX)
+ LEAL -64(SI), SI
+ ADDQ $0x05, CX
+ CMPL SI, $0x04
JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm
JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm
four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm:
- TESTL BX, BX
+ TESTL SI, SI
JZ repeat_end_emit_encodeSnappyBlockAsm
- XORL DI, DI
- LEAL -1(DI)(BX*4), BX
- MOVB BL, (AX)
- MOVL SI, 1(AX)
- ADDQ $0x05, AX
+ XORL R8, R8
+ LEAL -1(R8)(SI*4), SI
+ MOVB SI, (CX)
+ MOVL DI, 1(CX)
+ ADDQ $0x05, CX
JMP repeat_end_emit_encodeSnappyBlockAsm
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm
no_repeat_found_encodeSnappyBlockAsm:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm
candidate3_match_encodeSnappyBlockAsm:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm
candidate2_match_encodeSnappyBlockAsm:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm
match_extend_back_loop_encodeSnappyBlockAsm:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm
JMP match_extend_back_loop_encodeSnappyBlockAsm
match_extend_back_end_encodeSnappyBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB three_bytes_match_emit_encodeSnappyBlockAsm
- CMPL DI, $0x01000000
+ CMPL R8, $0x01000000
JB four_bytes_match_emit_encodeSnappyBlockAsm
- MOVB $0xfc, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
four_bytes_match_emit_encodeSnappyBlockAsm:
- MOVL DI, R9
- SHRL $0x10, R9
- MOVB $0xf8, (AX)
- MOVW DI, 1(AX)
- MOVB R9, 3(AX)
- ADDQ $0x04, AX
+ MOVL R8, R10
+ SHRL $0x10, R10
+ MOVB $0xf8, (CX)
+ MOVW R8, 1(CX)
+ MOVB R10, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
three_bytes_match_emit_encodeSnappyBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm
two_bytes_match_emit_encodeSnappyBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm
JMP memmove_long_match_emit_encodeSnappyBlockAsm
one_byte_match_emit_encodeSnappyBlockAsm:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm
memmove_long_match_emit_encodeSnappyBlockAsm:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm:
match_nolit_loop_encodeSnappyBlockAsm:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm
matchlen_match8_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm
matchlen_match4_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm
JB match_nolit_end_encodeSnappyBlockAsm
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm
matchlen_match1_match_nolit_encodeSnappyBlockAsm:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB two_byte_offset_match_nolit_encodeSnappyBlockAsm
four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm
- MOVB $0xff, (AX)
- MOVL BX, 1(AX)
- LEAL -64(R9), R9
- ADDQ $0x05, AX
- CMPL R9, $0x04
+ MOVB $0xff, (CX)
+ MOVL SI, 1(CX)
+ LEAL -64(R10), R10
+ ADDQ $0x05, CX
+ CMPL R10, $0x04
JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm
JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm
four_bytes_remain_match_nolit_encodeSnappyBlockAsm:
- TESTL R9, R9
+ TESTL R10, R10
JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm
- XORL SI, SI
- LEAL -1(SI)(R9*4), R9
- MOVB R9, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ XORL DI, DI
+ LEAL -1(DI)(R10*4), R10
+ MOVB R10, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
two_byte_offset_match_nolit_encodeSnappyBlockAsm:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm
emit_copy_three_match_nolit_encodeSnappyBlockAsm:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x32, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x32, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x32, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x32, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm
emit_remainder_encodeSnappyBlockAsm:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm
@@ -11496,41 +11507,41 @@ emit_remainder_ok_encodeSnappyBlockAsm:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm
CMPL DX, $0x01000000
JB four_bytes_emit_remainder_encodeSnappyBlockAsm
- MOVB $0xfc, (AX)
- MOVL DX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL DX, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
four_bytes_emit_remainder_encodeSnappyBlockAsm:
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
three_bytes_emit_remainder_encodeSnappyBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
two_bytes_emit_remainder_encodeSnappyBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm
one_byte_emit_remainder_encodeSnappyBlockAsm:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -11546,73 +11557,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm
memmove_long_emit_remainder_encodeSnappyBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back:
MOVOU (SI), X4
@@ -11626,718 +11637,719 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm64K(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm64K:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm64K
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm64K:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm64K
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm64K
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm64K
repeat_extend_back_loop_encodeSnappyBlockAsm64K:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm64K
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm64K
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K
repeat_extend_back_end_encodeSnappyBlockAsm64K:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm64K:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm64K
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K
JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K
three_bytes_repeat_emit_encodeSnappyBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
two_bytes_repeat_emit_encodeSnappyBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm64K
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K
one_byte_repeat_emit_encodeSnappyBlockAsm64K:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K
memmove_long_repeat_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
JB repeat_extend_forward_end_encodeSnappyBlockAsm64K
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm64K:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm64K
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm64K:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm64K
no_repeat_found_encodeSnappyBlockAsm64K:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm64K
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm64K
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm64K
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm64K
candidate3_match_encodeSnappyBlockAsm64K:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm64K
candidate2_match_encodeSnappyBlockAsm64K:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm64K:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm64K
match_extend_back_loop_encodeSnappyBlockAsm64K:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm64K
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm64K
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm64K
JMP match_extend_back_loop_encodeSnappyBlockAsm64K
match_extend_back_end_encodeSnappyBlockAsm64K:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm64K:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm64K
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm64K
JB three_bytes_match_emit_encodeSnappyBlockAsm64K
three_bytes_match_emit_encodeSnappyBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
two_bytes_match_emit_encodeSnappyBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm64K
JMP memmove_long_match_emit_encodeSnappyBlockAsm64K
one_byte_match_emit_encodeSnappyBlockAsm64K:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm64K:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K
memmove_long_match_emit_encodeSnappyBlockAsm64K:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm64K:
match_nolit_loop_encodeSnappyBlockAsm64K:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm64K
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match8_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
JB match_nolit_end_encodeSnappyBlockAsm64K
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm64K
matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm64K
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm64K:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm64K:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K
emit_copy_three_match_nolit_encodeSnappyBlockAsm64K:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm64K:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm64K
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm64K:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x32, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x32, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x32, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x32, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm64K
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm64K
emit_remainder_encodeSnappyBlockAsm64K:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm64K:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm64K
@@ -12346,26 +12358,26 @@ emit_remainder_ok_encodeSnappyBlockAsm64K:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K
three_bytes_emit_remainder_encodeSnappyBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
two_bytes_emit_remainder_encodeSnappyBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm64K
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K
one_byte_emit_remainder_encodeSnappyBlockAsm64K:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm64K:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -12381,73 +12393,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm64K:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
-emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7:
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K
memmove_long_emit_remainder_encodeSnappyBlockAsm64K:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back:
MOVOU (SI), X4
@@ -12461,718 +12473,719 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000080, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm12B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000080, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm12B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm12B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm12B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm12B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x000000cf1bbcdcbb, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x18, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x000000cf1bbcdcbb, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x18, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x34, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x18, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x18, R11
+ IMULQ R9, R11
+ SHRQ $0x34, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x18, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm12B
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm12B
repeat_extend_back_loop_encodeSnappyBlockAsm12B:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm12B
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm12B
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B
repeat_extend_back_end_encodeSnappyBlockAsm12B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm12B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B
JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B
three_bytes_repeat_emit_encodeSnappyBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
two_bytes_repeat_emit_encodeSnappyBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm12B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B
one_byte_repeat_emit_encodeSnappyBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B
memmove_long_repeat_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
JB repeat_extend_forward_end_encodeSnappyBlockAsm12B
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm12B:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm12B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm12B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm12B
no_repeat_found_encodeSnappyBlockAsm12B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm12B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm12B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm12B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm12B
candidate3_match_encodeSnappyBlockAsm12B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm12B
candidate2_match_encodeSnappyBlockAsm12B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm12B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm12B
match_extend_back_loop_encodeSnappyBlockAsm12B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm12B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm12B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm12B
JMP match_extend_back_loop_encodeSnappyBlockAsm12B
match_extend_back_end_encodeSnappyBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm12B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm12B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm12B
JB three_bytes_match_emit_encodeSnappyBlockAsm12B
three_bytes_match_emit_encodeSnappyBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
two_bytes_match_emit_encodeSnappyBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm12B
JMP memmove_long_match_emit_encodeSnappyBlockAsm12B
one_byte_match_emit_encodeSnappyBlockAsm12B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm12B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B
memmove_long_match_emit_encodeSnappyBlockAsm12B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm12B:
match_nolit_loop_encodeSnappyBlockAsm12B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm12B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match8_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
JB match_nolit_end_encodeSnappyBlockAsm12B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm12B
matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm12B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm12B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm12B:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B
emit_copy_three_match_nolit_encodeSnappyBlockAsm12B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm12B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm12B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm12B:
- MOVQ $0x000000cf1bbcdcbb, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x18, DI
- IMULQ R8, DI
- SHRQ $0x34, DI
- SHLQ $0x18, BX
- IMULQ R8, BX
- SHRQ $0x34, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x000000cf1bbcdcbb, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x18, R8
+ IMULQ R9, R8
+ SHRQ $0x34, R8
+ SHLQ $0x18, SI
+ IMULQ R9, SI
+ SHRQ $0x34, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm12B
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm12B
emit_remainder_encodeSnappyBlockAsm12B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm12B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm12B
@@ -13181,26 +13194,26 @@ emit_remainder_ok_encodeSnappyBlockAsm12B:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B
three_bytes_emit_remainder_encodeSnappyBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
two_bytes_emit_remainder_encodeSnappyBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm12B
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B
one_byte_emit_remainder_encodeSnappyBlockAsm12B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -13216,73 +13229,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm12B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B
memmove_long_emit_remainder_encodeSnappyBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back:
MOVOU (SI), X4
@@ -13296,718 +13309,719 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000020, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm10B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000020, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm10B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm10B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm10B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm10B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x36, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x36, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm10B
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm10B
repeat_extend_back_loop_encodeSnappyBlockAsm10B:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm10B
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm10B
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B
repeat_extend_back_end_encodeSnappyBlockAsm10B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm10B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B
JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B
three_bytes_repeat_emit_encodeSnappyBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
two_bytes_repeat_emit_encodeSnappyBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm10B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B
one_byte_repeat_emit_encodeSnappyBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B
memmove_long_repeat_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
JB repeat_extend_forward_end_encodeSnappyBlockAsm10B
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm10B:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm10B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm10B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm10B
no_repeat_found_encodeSnappyBlockAsm10B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm10B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm10B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm10B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm10B
candidate3_match_encodeSnappyBlockAsm10B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm10B
candidate2_match_encodeSnappyBlockAsm10B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm10B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm10B
match_extend_back_loop_encodeSnappyBlockAsm10B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm10B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm10B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm10B
JMP match_extend_back_loop_encodeSnappyBlockAsm10B
match_extend_back_end_encodeSnappyBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm10B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm10B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm10B
JB three_bytes_match_emit_encodeSnappyBlockAsm10B
three_bytes_match_emit_encodeSnappyBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
two_bytes_match_emit_encodeSnappyBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm10B
JMP memmove_long_match_emit_encodeSnappyBlockAsm10B
one_byte_match_emit_encodeSnappyBlockAsm10B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm10B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B
memmove_long_match_emit_encodeSnappyBlockAsm10B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm10B:
match_nolit_loop_encodeSnappyBlockAsm10B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm10B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match8_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
JB match_nolit_end_encodeSnappyBlockAsm10B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm10B
matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm10B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm10B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm10B:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B
emit_copy_three_match_nolit_encodeSnappyBlockAsm10B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm10B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm10B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm10B:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x36, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x36, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x36, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x36, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm10B
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm10B
emit_remainder_encodeSnappyBlockAsm10B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm10B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm10B
@@ -14016,26 +14030,26 @@ emit_remainder_ok_encodeSnappyBlockAsm10B:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B
three_bytes_emit_remainder_encodeSnappyBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
two_bytes_emit_remainder_encodeSnappyBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm10B
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B
one_byte_emit_remainder_encodeSnappyBlockAsm10B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -14051,73 +14065,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm10B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B
memmove_long_emit_remainder_encodeSnappyBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back:
MOVOU (SI), X4
@@ -14131,714 +14145,715 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
+// func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000008, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBlockAsm8B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000008, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBlockAsm8B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBlockAsm8B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBlockAsm8B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm8B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x38, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x38, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x38, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x38, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x38, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_encodeSnappyBlockAsm8B
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_encodeSnappyBlockAsm8B
repeat_extend_back_loop_encodeSnappyBlockAsm8B:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_encodeSnappyBlockAsm8B
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_encodeSnappyBlockAsm8B
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B
repeat_extend_back_end_encodeSnappyBlockAsm8B:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_encodeSnappyBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
repeat_dst_size_check_encodeSnappyBlockAsm8B:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_encodeSnappyBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B
JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B
three_bytes_repeat_emit_encodeSnappyBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
two_bytes_repeat_emit_encodeSnappyBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_encodeSnappyBlockAsm8B
JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B
one_byte_repeat_emit_encodeSnappyBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_repeat_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveShort
- CMPQ DI, $0x08
+ CMPQ R8, $0x08
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8
- CMPQ DI, $0x10
+ CMPQ R8, $0x10
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
- CMPQ DI, $0x20
+ CMPQ R8, $0x20
JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8:
- MOVQ (R8), R9
- MOVQ R9, (AX)
+ MOVQ (R9), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
- MOVQ (R8), R9
- MOVQ -8(R8)(DI*1), R8
- MOVQ R9, (AX)
- MOVQ R8, -8(AX)(DI*1)
+ MOVQ (R9), R10
+ MOVQ -8(R9)(R8*1), R9
+ MOVQ R10, (CX)
+ MOVQ R9, -8(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
- MOVOU (R8), X0
- MOVOU -16(R8)(DI*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU -16(R9)(R8*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R8*1)
JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B
memmove_long_repeat_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(DI*1), BX
+ LEAQ (CX)(R8*1), SI
// genMemMoveLong
- MOVOU (R8), X0
- MOVOU 16(R8), X1
- MOVOU -32(R8)(DI*1), X2
- MOVOU -16(R8)(DI*1), X3
- MOVQ DI, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (R9), X0
+ MOVOU 16(R9), X1
+ MOVOU -32(R9)(R8*1), X2
+ MOVOU -16(R9)(R8*1), X3
+ MOVQ R8, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R8)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(R9)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R8)(R11*1), X4
- MOVOU -16(R8)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ DI, R11
+ MOVOU -32(R9)(R12*1), X4
+ MOVOU -16(R9)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R8, R12
JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(DI*1)
- MOVOU X3, -16(AX)(DI*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R8*1)
+ MOVOU X3, -16(CX)(R8*1)
+ MOVQ SI, CX
emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B
matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
JB repeat_extend_forward_end_encodeSnappyBlockAsm8B
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_encodeSnappyBlockAsm8B:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B
- MOVB $0xee, (AX)
- MOVW SI, 1(AX)
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW DI, 1(CX)
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B
two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B
- LEAL -15(DI), DI
- MOVB SI, 1(AX)
- SHRL $0x08, SI
- SHLL $0x05, SI
- ORL SI, DI
- MOVB DI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(R8), R8
+ MOVB DI, 1(CX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (CX)
+ ADDQ $0x02, CX
JMP repeat_end_emit_encodeSnappyBlockAsm8B
emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B:
- LEAL -2(DI), DI
- MOVB DI, (AX)
- MOVW SI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(R8), R8
+ MOVB R8, (CX)
+ MOVW DI, 1(CX)
+ ADDQ $0x03, CX
repeat_end_emit_encodeSnappyBlockAsm8B:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_encodeSnappyBlockAsm8B
no_repeat_found_encodeSnappyBlockAsm8B:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBlockAsm8B
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_encodeSnappyBlockAsm8B
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_encodeSnappyBlockAsm8B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBlockAsm8B
candidate3_match_encodeSnappyBlockAsm8B:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_encodeSnappyBlockAsm8B
candidate2_match_encodeSnappyBlockAsm8B:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBlockAsm8B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBlockAsm8B
match_extend_back_loop_encodeSnappyBlockAsm8B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBlockAsm8B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBlockAsm8B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBlockAsm8B
JMP match_extend_back_loop_encodeSnappyBlockAsm8B
match_extend_back_end_encodeSnappyBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBlockAsm8B:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), DI
- CMPL DI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), R8
+ CMPL R8, $0x3c
JB one_byte_match_emit_encodeSnappyBlockAsm8B
- CMPL DI, $0x00000100
+ CMPL R8, $0x00000100
JB two_bytes_match_emit_encodeSnappyBlockAsm8B
JB three_bytes_match_emit_encodeSnappyBlockAsm8B
three_bytes_match_emit_encodeSnappyBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
two_bytes_match_emit_encodeSnappyBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DI, 1(AX)
- ADDQ $0x02, AX
- CMPL DI, $0x40
+ MOVB $0xf0, (CX)
+ MOVB R8, 1(CX)
+ ADDQ $0x02, CX
+ CMPL R8, $0x40
JB memmove_match_emit_encodeSnappyBlockAsm8B
JMP memmove_long_match_emit_encodeSnappyBlockAsm8B
one_byte_match_emit_encodeSnappyBlockAsm8B:
- SHLB $0x02, DI
- MOVB DI, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, R8
+ MOVB R8, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8:
- MOVQ (SI), R9
- MOVQ R9, (AX)
+ MOVQ (DI), R10
+ MOVQ R10, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16:
- MOVQ (SI), R9
- MOVQ -8(SI)(R8*1), SI
- MOVQ R9, (AX)
- MOVQ SI, -8(AX)(R8*1)
+ MOVQ (DI), R10
+ MOVQ -8(DI)(R9*1), DI
+ MOVQ R10, (CX)
+ MOVQ DI, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32:
- MOVOU (SI), X0
- MOVOU -16(SI)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU -16(DI)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64:
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBlockAsm8B:
- MOVQ DI, AX
+ MOVQ R8, CX
JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B
memmove_long_match_emit_encodeSnappyBlockAsm8B:
- LEAQ (AX)(R8*1), DI
+ LEAQ (CX)(R9*1), R8
// genMemMoveLong
- MOVOU (SI), X0
- MOVOU 16(SI), X1
- MOVOU -32(SI)(R8*1), X2
- MOVOU -16(SI)(R8*1), X3
- MOVQ R8, R10
- SHRQ $0x05, R10
- MOVQ AX, R9
- ANDL $0x0000001f, R9
- MOVQ $0x00000040, R11
- SUBQ R9, R11
- DECQ R10
+ MOVOU (DI), X0
+ MOVOU 16(DI), X1
+ MOVOU -32(DI)(R9*1), X2
+ MOVOU -16(DI)(R9*1), X3
+ MOVQ R9, R11
+ SHRQ $0x05, R11
+ MOVQ CX, R10
+ ANDL $0x0000001f, R10
+ MOVQ $0x00000040, R12
+ SUBQ R10, R12
+ DECQ R11
JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(SI)(R11*1), R9
- LEAQ -32(AX)(R11*1), R12
+ LEAQ -32(DI)(R12*1), R10
+ LEAQ -32(CX)(R12*1), R13
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back:
- MOVOU (R9), X4
- MOVOU 16(R9), X5
- MOVOA X4, (R12)
- MOVOA X5, 16(R12)
+ MOVOU (R10), X4
+ MOVOU 16(R10), X5
+ MOVOA X4, (R13)
+ MOVOA X5, 16(R13)
+ ADDQ $0x20, R13
+ ADDQ $0x20, R10
ADDQ $0x20, R12
- ADDQ $0x20, R9
- ADDQ $0x20, R11
- DECQ R10
+ DECQ R11
JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(SI)(R11*1), X4
- MOVOU -16(SI)(R11*1), X5
- MOVOA X4, -32(AX)(R11*1)
- MOVOA X5, -16(AX)(R11*1)
- ADDQ $0x20, R11
- CMPQ R8, R11
+ MOVOU -32(DI)(R12*1), X4
+ MOVOU -16(DI)(R12*1), X5
+ MOVOA X4, -32(CX)(R12*1)
+ MOVOA X5, -16(CX)(R12*1)
+ ADDQ $0x20, R12
+ CMPQ R9, R12
JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ DI, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ R8, CX
emit_literal_done_match_emit_encodeSnappyBlockAsm8B:
match_nolit_loop_encodeSnappyBlockAsm8B:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBlockAsm8B
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B
matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match8_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
JB match_nolit_end_encodeSnappyBlockAsm8B
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_encodeSnappyBlockAsm8B
matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm8B
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_encodeSnappyBlockAsm8B:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBlockAsm8B:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B
- MOVB $0xee, (AX)
- MOVW BX, 1(AX)
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW SI, 1(CX)
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B
two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B
- LEAL -15(SI), SI
- MOVB BL, 1(AX)
- SHRL $0x08, BX
- SHLL $0x05, BX
- ORL BX, SI
- MOVB SI, (AX)
- ADDQ $0x02, AX
+ LEAL -15(DI), DI
+ MOVB SI, 1(CX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B
emit_copy_three_match_nolit_encodeSnappyBlockAsm8B:
- LEAL -2(SI), SI
- MOVB SI, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(DI), DI
+ MOVB DI, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBlockAsm8B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBlockAsm8B
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBlockAsm8B:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x38, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x38, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x38, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x38, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_encodeSnappyBlockAsm8B
- INCL CX
+ INCL DX
JMP search_loop_encodeSnappyBlockAsm8B
emit_remainder_encodeSnappyBlockAsm8B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBlockAsm8B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBlockAsm8B
@@ -14847,26 +14862,26 @@ emit_remainder_ok_encodeSnappyBlockAsm8B:
JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B
three_bytes_emit_remainder_encodeSnappyBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
two_bytes_emit_remainder_encodeSnappyBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBlockAsm8B
JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B
one_byte_emit_remainder_encodeSnappyBlockAsm8B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -14882,73 +14897,73 @@ memmove_emit_remainder_encodeSnappyBlockAsm8B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B
memmove_long_emit_remainder_encodeSnappyBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back:
MOVOU (SI), X4
@@ -14962,520 +14977,521 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back:
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00001200, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00001200, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x07, BX
- CMPL BX, $0x63
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x07, SI
+ CMPL SI, $0x63
JBE check_maxskip_ok_encodeSnappyBetterBlockAsm
- LEAL 100(CX), BX
+ LEAL 100(DX), SI
JMP check_maxskip_cont_encodeSnappyBetterBlockAsm
check_maxskip_ok_encodeSnappyBetterBlockAsm:
- LEAL 1(CX)(BX*1), BX
+ LEAL 1(DX)(SI*1), SI
check_maxskip_cont_encodeSnappyBetterBlockAsm:
- CMPL BX, 8(SP)
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 524312(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 524312(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 524288(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 524288(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm
no_short_found_encodeSnappyBetterBlockAsm:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm
candidateS_match_encodeSnappyBetterBlockAsm:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x2f, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x2f, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm
match_extend_back_loop_encodeSnappyBetterBlockAsm:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm
match_extend_back_end_encodeSnappyBetterBlockAsm:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
JB match_nolit_end_encodeSnappyBetterBlockAsm
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- CMPL R11, $0x01
+ CMPL R12, $0x01
JA match_length_ok_encodeSnappyBetterBlockAsm
- CMPL DI, $0x0000ffff
+ CMPL R8, $0x0000ffff
JBE match_length_ok_encodeSnappyBetterBlockAsm
- MOVL 20(SP), CX
- INCL CX
+ MOVL 20(SP), DX
+ INCL DX
JMP search_loop_encodeSnappyBetterBlockAsm
match_length_ok_encodeSnappyBetterBlockAsm:
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_match_emit_encodeSnappyBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL BX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL SI, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
four_bytes_match_emit_encodeSnappyBetterBlockAsm:
- MOVL BX, R10
- SHRL $0x10, R10
- MOVB $0xf8, (AX)
- MOVW BX, 1(AX)
- MOVB R10, 3(AX)
- ADDQ $0x04, AX
+ MOVL SI, R11
+ SHRL $0x10, R11
+ MOVB $0xf8, (CX)
+ MOVW SI, 1(CX)
+ MOVB R11, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
three_bytes_match_emit_encodeSnappyBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
two_bytes_match_emit_encodeSnappyBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm
one_byte_match_emit_encodeSnappyBetterBlockAsm:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm
memmove_long_match_emit_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
- CMPL DI, $0x00010000
+ CMPL R8, $0x00010000
JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
- MOVB $0xff, (AX)
- MOVL DI, 1(AX)
- LEAL -64(R11), R11
- ADDQ $0x05, AX
- CMPL R11, $0x04
+ MOVB $0xff, (CX)
+ MOVL R8, 1(CX)
+ LEAL -64(R12), R12
+ ADDQ $0x05, CX
+ CMPL R12, $0x04
JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm
JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm
four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm:
- TESTL R11, R11
+ TESTL R12, R12
JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
- XORL BX, BX
- LEAL -1(BX)(R11*4), R11
- MOVB R11, (AX)
- MOVL DI, 1(AX)
- ADDQ $0x05, AX
+ XORL SI, SI
+ LEAL -1(SI)(R12*4), R12
+ MOVB R12, (CX)
+ MOVL R8, 1(CX)
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm:
- MOVQ $0x00cf1bbcdcbfa563, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x32, R10
- SHLQ $0x08, R11
- IMULQ BX, R11
- SHRQ $0x2f, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x32, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 524312(SP)(R10*4)
- MOVL R13, 524312(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x00cf1bbcdcbfa563, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x08, R10
+ IMULQ SI, R10
+ SHRQ $0x2f, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x32, R11
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 524288(AX)(R11*4)
+ MOVL R14, 524288(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x2f, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x08, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x2f, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x08, R11
+ IMULQ SI, R11
+ SHRQ $0x2f, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm
emit_remainder_encodeSnappyBetterBlockAsm:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm
@@ -15485,41 +15501,41 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm
CMPL DX, $0x01000000
JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm
- MOVB $0xfc, (AX)
- MOVL DX, 1(AX)
- ADDQ $0x05, AX
+ MOVB $0xfc, (CX)
+ MOVL DX, 1(CX)
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
four_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
MOVL DX, BX
SHRL $0x10, BX
- MOVB $0xf8, (AX)
- MOVW DX, 1(AX)
- MOVB BL, 3(AX)
- ADDQ $0x04, AX
+ MOVB $0xf8, (CX)
+ MOVW DX, 1(CX)
+ MOVB BL, 3(CX)
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm
one_byte_emit_remainder_encodeSnappyBetterBlockAsm:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -15535,73 +15551,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back:
MOVOU (SI), X4
@@ -15615,463 +15631,464 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_ba
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000a00, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm64K(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000900, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm64K:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm64K
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm64K:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x07, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x07, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm64K
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x00cf1bbcdcbfa563, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x30, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x32, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 262168(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 262168(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x00cf1bbcdcbfa563, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x30, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x33, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 262144(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 262144(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm64K
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm64K
no_short_found_encodeSnappyBetterBlockAsm64K:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm64K
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm64K
candidateS_match_encodeSnappyBetterBlockAsm64K:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x08, R9
- IMULQ R8, R9
- SHRQ $0x30, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x08, R10
+ IMULQ R9, R10
+ SHRQ $0x30, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm64K:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
match_extend_back_loop_encodeSnappyBetterBlockAsm64K:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K
match_extend_back_end_encodeSnappyBetterBlockAsm64K:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm64K:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
JB match_nolit_end_encodeSnappyBetterBlockAsm64K
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm64K:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K
three_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
two_bytes_match_emit_encodeSnappyBetterBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm64K
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K
one_byte_match_emit_encodeSnappyBetterBlockAsm64K:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K
memmove_long_match_emit_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
-
-emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+
+emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm64K
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K:
- MOVQ $0x00cf1bbcdcbfa563, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x30, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x32, R10
- SHLQ $0x08, R11
- IMULQ BX, R11
- SHRQ $0x30, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x32, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 262168(SP)(R10*4)
- MOVL R13, 262168(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x00cf1bbcdcbfa563, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x08, R10
+ IMULQ SI, R10
+ SHRQ $0x30, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x33, R11
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x30, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x33, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 262144(AX)(R11*4)
+ MOVL R14, 262144(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm64K:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm64K
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x08, R9
- IMULQ BX, R9
- SHRQ $0x30, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x08, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x30, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x08, R11
+ IMULQ SI, R11
+ SHRQ $0x30, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm64K
emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm64K:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K
@@ -16080,26 +16097,26 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm64K:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K
one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -16115,73 +16132,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm64K:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back:
MOVOU (SI), X4
@@ -16195,463 +16212,464 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000280, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm12B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000280, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm12B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm12B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm12B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x06, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x06, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm12B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x34, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 65560(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 65560(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x34, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 65536(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 65536(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm12B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm12B
no_short_found_encodeSnappyBetterBlockAsm12B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm12B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm12B
candidateS_match_encodeSnappyBetterBlockAsm12B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x32, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x32, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm12B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
match_extend_back_loop_encodeSnappyBetterBlockAsm12B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B
match_extend_back_end_encodeSnappyBetterBlockAsm12B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm12B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
JB match_nolit_end_encodeSnappyBetterBlockAsm12B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm12B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B
three_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
two_bytes_match_emit_encodeSnappyBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm12B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B
one_byte_match_emit_encodeSnappyBetterBlockAsm12B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B
memmove_long_match_emit_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm12B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x32, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x34, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x32, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x34, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 65560(SP)(R10*4)
- MOVL R13, 65560(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x32, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x34, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x32, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x34, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 65536(AX)(R11*4)
+ MOVL R14, 65536(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm12B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm12B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x32, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x32, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x32, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm12B
emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm12B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B
@@ -16660,26 +16678,26 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm12B:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B
one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -16695,73 +16713,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm12B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
- JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
-
-emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64:
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back:
MOVOU (SI), X4
@@ -16775,463 +16793,464 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x000000a0, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm10B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x000000a0, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm10B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm10B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm10B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm10B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x36, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 16408(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 16408(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x36, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 16384(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 16384(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm10B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm10B
no_short_found_encodeSnappyBetterBlockAsm10B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm10B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm10B
candidateS_match_encodeSnappyBetterBlockAsm10B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x34, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x34, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm10B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
match_extend_back_loop_encodeSnappyBetterBlockAsm10B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B
match_extend_back_end_encodeSnappyBetterBlockAsm10B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm10B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
JB match_nolit_end_encodeSnappyBetterBlockAsm10B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm10B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B
three_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
two_bytes_match_emit_encodeSnappyBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm10B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B
one_byte_match_emit_encodeSnappyBetterBlockAsm10B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B
memmove_long_match_emit_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
- CMPL DI, $0x00000800
+ CMPL R8, $0x00000800
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm10B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x34, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x36, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x34, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x36, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 16408(SP)(R10*4)
- MOVL R13, 16408(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x34, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x36, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x34, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x36, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 16384(AX)(R11*4)
+ MOVL R14, 16384(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm10B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm10B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x34, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x34, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x34, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm10B
emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm10B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B
@@ -17240,26 +17259,26 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm10B:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B
one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -17275,73 +17294,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm10B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back:
MOVOU (SI), X4
@@ -17355,461 +17374,462 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
+// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int
// Requires: BMI, SSE2
-TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56
- MOVQ dst_base+0(FP), AX
- MOVQ $0x00000028, CX
- LEAQ 24(SP), DX
+TEXT ·encodeSnappyBetterBlockAsm8B(SB), $24-64
+ MOVQ tmp+48(FP), AX
+ MOVQ dst_base+0(FP), CX
+ MOVQ $0x00000028, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_encodeSnappyBetterBlockAsm8B:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_encodeSnappyBetterBlockAsm8B
MOVL $0x00000000, 12(SP)
- MOVQ src_len+32(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
+ MOVQ src_len+32(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
MOVL $0x00000000, 16(SP)
- MOVQ src_base+24(FP), DX
+ MOVQ src_base+24(FP), BX
search_loop_encodeSnappyBetterBlockAsm8B:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 1(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 1(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm8B
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ $0x9e3779b1, BX
- MOVQ SI, R9
- MOVQ SI, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- SHLQ $0x20, R10
- IMULQ BX, R10
- SHRQ $0x38, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 4120(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- MOVL CX, 4120(SP)(R10*4)
- MOVQ (DX)(BX*1), R9
- MOVQ (DX)(DI*1), R10
- CMPQ R9, SI
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ $0x9e3779b1, SI
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ SHLQ $0x20, R11
+ IMULQ SI, R11
+ SHRQ $0x38, R11
+ MOVL (AX)(R10*4), SI
+ MOVL 4096(AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ MOVL DX, 4096(AX)(R11*4)
+ MOVQ (BX)(SI*1), R10
+ MOVQ (BX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- CMPQ R10, SI
+ CMPQ R11, DI
JNE no_short_found_encodeSnappyBetterBlockAsm8B
- MOVL DI, BX
+ MOVL R8, SI
JMP candidate_match_encodeSnappyBetterBlockAsm8B
no_short_found_encodeSnappyBetterBlockAsm8B:
- CMPL R9, SI
+ CMPL R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- CMPL R10, SI
+ CMPL R11, DI
JEQ candidateS_match_encodeSnappyBetterBlockAsm8B
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_encodeSnappyBetterBlockAsm8B
candidateS_match_encodeSnappyBetterBlockAsm8B:
- SHRQ $0x08, SI
- MOVQ SI, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x36, R9
- MOVL 24(SP)(R9*4), BX
- INCL CX
- MOVL CX, 24(SP)(R9*4)
- CMPL (DX)(BX*1), SI
+ SHRQ $0x08, DI
+ MOVQ DI, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x36, R10
+ MOVL (AX)(R10*4), SI
+ INCL DX
+ MOVL DX, (AX)(R10*4)
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- DECL CX
- MOVL DI, BX
+ DECL DX
+ MOVL R8, SI
candidate_match_encodeSnappyBetterBlockAsm8B:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
match_extend_back_loop_encodeSnappyBetterBlockAsm8B:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B
JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B
match_extend_back_end_encodeSnappyBetterBlockAsm8B:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_encodeSnappyBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_dst_size_check_encodeSnappyBetterBlockAsm8B:
- MOVL CX, SI
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+32(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), R9
+ MOVL DX, DI
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+32(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), R10
// matchLen
- XORL R11, R11
+ XORL R12, R12
matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVQ (R8)(R11*1), R10
- MOVQ 8(R8)(R11*1), R12
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ MOVQ 8(R9)(R12*1), R13
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B
- XORQ 8(R9)(R11*1), R12
+ XORQ 8(R10)(R12*1), R13
JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL -16(DI), DI
- LEAL 16(R11), R11
+ LEAL -16(R8), R8
+ LEAL 16(R12), R12
JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B
matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R12, R12
+ TZCNTQ R13, R13
#else
- BSFQ R12, R12
+ BSFQ R13, R13
#endif
- SARQ $0x03, R12
- LEAL 8(R11)(R12*1), R11
+ SARQ $0x03, R13
+ LEAL 8(R12)(R13*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVQ (R8)(R11*1), R10
- XORQ (R9)(R11*1), R10
+ MOVQ (R9)(R12*1), R11
+ XORQ (R10)(R12*1), R11
JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL -8(DI), DI
- LEAL 8(R11), R11
+ LEAL -8(R8), R8
+ LEAL 8(R12), R12
JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVL (R8)(R11*1), R10
- CMPL (R9)(R11*1), R10
- JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL -4(DI), DI
- LEAL 4(R11), R11
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ LEAL -4(R8), R8
+ LEAL 4(R12), R12
matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
JB match_nolit_end_encodeSnappyBetterBlockAsm8B
- MOVW (R8)(R11*1), R10
- CMPW (R9)(R11*1), R10
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL 2(R11), R11
- SUBL $0x02, DI
+ LEAL 2(R12), R12
+ SUBL $0x02, R8
JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
- MOVB (R8)(R11*1), R10
- CMPB (R9)(R11*1), R10
+ MOVB (R9)(R12*1), R11
+ CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
- LEAL 1(R11), R11
+ LEAL 1(R12), R12
match_nolit_end_encodeSnappyBetterBlockAsm8B:
- MOVL CX, DI
- SUBL BX, DI
+ MOVL DX, R8
+ SUBL SI, R8
// Check if repeat
- MOVL DI, 16(SP)
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL R8, 16(SP)
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R9
- SUBL BX, R8
- LEAL -1(R8), BX
- CMPL BX, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R10
+ SUBL SI, R9
+ LEAL -1(R9), SI
+ CMPL SI, $0x3c
JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B
JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B
three_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW BX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW SI, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
two_bytes_match_emit_encodeSnappyBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB BL, 1(AX)
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ MOVB $0xf0, (CX)
+ MOVB SI, 1(CX)
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_match_emit_encodeSnappyBetterBlockAsm8B
JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B
one_byte_match_emit_encodeSnappyBetterBlockAsm8B:
- SHLB $0x02, BL
- MOVB BL, (AX)
- ADDQ $0x01, AX
+ SHLB $0x02, SI
+ MOVB SI, (CX)
+ ADDQ $0x01, CX
memmove_match_emit_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveShort
- CMPQ R8, $0x08
+ CMPQ R9, $0x08
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8
- CMPQ R8, $0x10
+ CMPQ R9, $0x10
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
- CMPQ R8, $0x20
+ CMPQ R9, $0x20
JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8:
- MOVQ (R9), R10
- MOVQ R10, (AX)
+ MOVQ (R10), R11
+ MOVQ R11, (CX)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (R9), R10
- MOVQ -8(R9)(R8*1), R9
- MOVQ R10, (AX)
- MOVQ R9, -8(AX)(R8*1)
+ MOVQ (R10), R11
+ MOVQ -8(R10)(R9*1), R10
+ MOVQ R11, (CX)
+ MOVQ R10, -8(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (R9), X0
- MOVOU -16(R9)(R8*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU -16(R10)(R9*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(R9*1)
JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B:
- MOVQ BX, AX
+ MOVQ SI, CX
JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B
memmove_long_match_emit_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(R8*1), BX
+ LEAQ (CX)(R9*1), SI
// genMemMoveLong
- MOVOU (R9), X0
- MOVOU 16(R9), X1
- MOVOU -32(R9)(R8*1), X2
- MOVOU -16(R9)(R8*1), X3
- MOVQ R8, R12
- SHRQ $0x05, R12
- MOVQ AX, R10
- ANDL $0x0000001f, R10
- MOVQ $0x00000040, R13
- SUBQ R10, R13
- DECQ R12
+ MOVOU (R10), X0
+ MOVOU 16(R10), X1
+ MOVOU -32(R10)(R9*1), X2
+ MOVOU -16(R10)(R9*1), X3
+ MOVQ R9, R13
+ SHRQ $0x05, R13
+ MOVQ CX, R11
+ ANDL $0x0000001f, R11
+ MOVQ $0x00000040, R14
+ SUBQ R11, R14
+ DECQ R13
JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(R9)(R13*1), R10
- LEAQ -32(AX)(R13*1), R14
+ LEAQ -32(R10)(R14*1), R11
+ LEAQ -32(CX)(R14*1), R15
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
- MOVOU (R10), X4
- MOVOU 16(R10), X5
- MOVOA X4, (R14)
- MOVOA X5, 16(R14)
+ MOVOU (R11), X4
+ MOVOU 16(R11), X5
+ MOVOA X4, (R15)
+ MOVOA X5, 16(R15)
+ ADDQ $0x20, R15
+ ADDQ $0x20, R11
ADDQ $0x20, R14
- ADDQ $0x20, R10
- ADDQ $0x20, R13
- DECQ R12
+ DECQ R13
JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(R9)(R13*1), X4
- MOVOU -16(R9)(R13*1), X5
- MOVOA X4, -32(AX)(R13*1)
- MOVOA X5, -16(AX)(R13*1)
- ADDQ $0x20, R13
- CMPQ R8, R13
+ MOVOU -32(R10)(R14*1), X4
+ MOVOU -16(R10)(R14*1), X5
+ MOVOA X4, -32(CX)(R14*1)
+ MOVOA X5, -16(CX)(R14*1)
+ ADDQ $0x20, R14
+ CMPQ R9, R14
JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(R8*1)
- MOVOU X3, -16(AX)(R8*1)
- MOVQ BX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(R9*1)
+ MOVOU X3, -16(CX)(R9*1)
+ MOVQ SI, CX
emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B:
- ADDL R11, CX
- ADDL $0x04, R11
- MOVL CX, 12(SP)
+ ADDL R12, DX
+ ADDL $0x04, R12
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B:
- CMPL R11, $0x40
+ CMPL R12, $0x40
JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B
- MOVB $0xee, (AX)
- MOVW DI, 1(AX)
- LEAL -60(R11), R11
- ADDQ $0x03, AX
+ MOVB $0xee, (CX)
+ MOVW R8, 1(CX)
+ LEAL -60(R12), R12
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B
two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B:
- MOVL R11, BX
- SHLL $0x02, BX
- CMPL R11, $0x0c
+ MOVL R12, SI
+ SHLL $0x02, SI
+ CMPL R12, $0x0c
JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B
- LEAL -15(BX), BX
- MOVB DI, 1(AX)
- SHRL $0x08, DI
- SHLL $0x05, DI
- ORL DI, BX
- MOVB BL, (AX)
- ADDQ $0x02, AX
+ LEAL -15(SI), SI
+ MOVB R8, 1(CX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (CX)
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B
emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B:
- LEAL -2(BX), BX
- MOVB BL, (AX)
- MOVW DI, 1(AX)
- ADDQ $0x03, AX
+ LEAL -2(SI), SI
+ MOVB SI, (CX)
+ MOVW R8, 1(CX)
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_encodeSnappyBetterBlockAsm8B
- CMPQ AX, (SP)
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B:
- MOVQ $0x0000cf1bbcdcbf9b, BX
- MOVQ $0x9e3779b1, DI
- LEAQ 1(SI), SI
- LEAQ -2(CX), R8
- MOVQ (DX)(SI*1), R9
- MOVQ 1(DX)(SI*1), R10
- MOVQ (DX)(R8*1), R11
- MOVQ 1(DX)(R8*1), R12
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x36, R9
- SHLQ $0x20, R10
- IMULQ DI, R10
- SHRQ $0x38, R10
- SHLQ $0x10, R11
- IMULQ BX, R11
- SHRQ $0x36, R11
- SHLQ $0x20, R12
- IMULQ DI, R12
- SHRQ $0x38, R12
- LEAQ 1(SI), DI
- LEAQ 1(R8), R13
- MOVL SI, 24(SP)(R9*4)
- MOVL R8, 24(SP)(R11*4)
- MOVL DI, 4120(SP)(R10*4)
- MOVL R13, 4120(SP)(R12*4)
- LEAQ 1(R8)(SI*1), DI
- SHRQ $0x01, DI
- ADDQ $0x01, SI
- SUBQ $0x01, R8
+ MOVQ $0x0000cf1bbcdcbf9b, SI
+ MOVQ $0x9e3779b1, R8
+ LEAQ 1(DI), DI
+ LEAQ -2(DX), R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ 1(BX)(DI*1), R11
+ MOVQ (BX)(R9*1), R12
+ MOVQ 1(BX)(R9*1), R13
+ SHLQ $0x10, R10
+ IMULQ SI, R10
+ SHRQ $0x36, R10
+ SHLQ $0x20, R11
+ IMULQ R8, R11
+ SHRQ $0x38, R11
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x36, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x38, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
+ MOVL DI, (AX)(R10*4)
+ MOVL R9, (AX)(R12*4)
+ MOVL R8, 4096(AX)(R11*4)
+ MOVL R14, 4096(AX)(R13*4)
+ LEAQ 1(R9)(DI*1), R8
+ SHRQ $0x01, R8
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
index_loop_encodeSnappyBetterBlockAsm8B:
- CMPQ DI, R8
+ CMPQ R8, R9
JAE search_loop_encodeSnappyBetterBlockAsm8B
- MOVQ (DX)(SI*1), R9
- MOVQ (DX)(DI*1), R10
- SHLQ $0x10, R9
- IMULQ BX, R9
- SHRQ $0x36, R9
+ MOVQ (BX)(DI*1), R10
+ MOVQ (BX)(R8*1), R11
SHLQ $0x10, R10
- IMULQ BX, R10
+ IMULQ SI, R10
SHRQ $0x36, R10
- MOVL SI, 24(SP)(R9*4)
- MOVL DI, 24(SP)(R10*4)
- ADDQ $0x02, SI
+ SHLQ $0x10, R11
+ IMULQ SI, R11
+ SHRQ $0x36, R11
+ MOVL DI, (AX)(R10*4)
+ MOVL R8, (AX)(R11*4)
ADDQ $0x02, DI
+ ADDQ $0x02, R8
JMP index_loop_encodeSnappyBetterBlockAsm8B
emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVQ src_len+32(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+32(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B
- MOVQ $0x00000000, ret+48(FP)
+ MOVQ $0x00000000, ret+56(FP)
RET
emit_remainder_ok_encodeSnappyBetterBlockAsm8B:
- MOVQ src_len+32(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+32(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
LEAL -1(SI), DX
CMPL DX, $0x3c
JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B
@@ -17818,26 +17838,26 @@ emit_remainder_ok_encodeSnappyBetterBlockAsm8B:
JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B
three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVB $0xf4, (AX)
- MOVW DX, 1(AX)
- ADDQ $0x03, AX
+ MOVB $0xf4, (CX)
+ MOVW DX, 1(CX)
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVB $0xf0, (AX)
- MOVB DL, 1(AX)
- ADDQ $0x02, AX
+ MOVB $0xf0, (CX)
+ MOVB DL, 1(CX)
+ ADDQ $0x02, CX
CMPL DX, $0x40
JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B
JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B
one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B:
SHLB $0x02, DL
- MOVB DL, (AX)
- ADDQ $0x01, AX
+ MOVB DL, (CX)
+ ADDQ $0x01, CX
memmove_emit_remainder_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveShort
@@ -17853,73 +17873,73 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm8B:
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2:
- MOVB (CX), SI
- MOVB -1(CX)(BX*1), CL
- MOVB SI, (AX)
- MOVB CL, -1(AX)(BX*1)
+ MOVB (AX), SI
+ MOVB -1(AX)(BX*1), AL
+ MOVB SI, (CX)
+ MOVB AL, -1(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3:
- MOVW (CX), SI
- MOVB 2(CX), CL
- MOVW SI, (AX)
- MOVB CL, 2(AX)
+ MOVW (AX), SI
+ MOVB 2(AX), AL
+ MOVW SI, (CX)
+ MOVB AL, 2(CX)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7:
- MOVL (CX), SI
- MOVL -4(CX)(BX*1), CX
- MOVL SI, (AX)
- MOVL CX, -4(AX)(BX*1)
+ MOVL (AX), SI
+ MOVL -4(AX)(BX*1), AX
+ MOVL SI, (CX)
+ MOVL AX, -4(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
- MOVQ (CX), SI
- MOVQ -8(CX)(BX*1), CX
- MOVQ SI, (AX)
- MOVQ CX, -8(AX)(BX*1)
+ MOVQ (AX), SI
+ MOVQ -8(AX)(BX*1), AX
+ MOVQ SI, (CX)
+ MOVQ AX, -8(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32:
- MOVOU (CX), X0
- MOVOU -16(CX)(BX*1), X1
- MOVOU X0, (AX)
- MOVOU X1, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU -16(AX)(BX*1), X1
+ MOVOU X0, (CX)
+ MOVOU X1, -16(CX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64:
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVQ DX, AX
+ MOVQ DX, CX
JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B
memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B:
- LEAQ (AX)(SI*1), DX
+ LEAQ (CX)(SI*1), DX
MOVL SI, BX
// genMemMoveLong
- MOVOU (CX), X0
- MOVOU 16(CX), X1
- MOVOU -32(CX)(BX*1), X2
- MOVOU -16(CX)(BX*1), X3
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU -32(AX)(BX*1), X2
+ MOVOU -16(AX)(BX*1), X3
MOVQ BX, DI
SHRQ $0x05, DI
- MOVQ AX, SI
+ MOVQ CX, SI
ANDL $0x0000001f, SI
MOVQ $0x00000040, R8
SUBQ SI, R8
DECQ DI
JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- LEAQ -32(CX)(R8*1), SI
- LEAQ -32(AX)(R8*1), R9
+ LEAQ -32(AX)(R8*1), SI
+ LEAQ -32(CX)(R8*1), R9
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back:
MOVOU (SI), X4
@@ -17933,1136 +17953,1142 @@ emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_
JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back
emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32:
- MOVOU -32(CX)(R8*1), X4
- MOVOU -16(CX)(R8*1), X5
- MOVOA X4, -32(AX)(R8*1)
- MOVOA X5, -16(AX)(R8*1)
+ MOVOU -32(AX)(R8*1), X4
+ MOVOU -16(AX)(R8*1), X5
+ MOVOA X4, -32(CX)(R8*1)
+ MOVOA X5, -16(CX)(R8*1)
ADDQ $0x20, R8
CMPQ BX, R8
JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32
- MOVOU X0, (AX)
- MOVOU X1, 16(AX)
- MOVOU X2, -32(AX)(BX*1)
- MOVOU X3, -16(AX)(BX*1)
- MOVQ DX, AX
+ MOVOU X0, (CX)
+ MOVOU X1, 16(CX)
+ MOVOU X2, -32(CX)(BX*1)
+ MOVOU X3, -16(CX)(BX*1)
+ MOVQ DX, CX
emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B:
- MOVQ dst_base+0(FP), CX
- SUBQ CX, AX
- MOVQ AX, ret+48(FP)
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, CX
+ MOVQ CX, ret+56(FP)
RET
-// func calcBlockSize(src []byte) int
+// func calcBlockSize(src []byte, tmp *[32768]byte) int
// Requires: BMI, SSE2
-TEXT ·calcBlockSize(SB), $32792-32
- XORQ AX, AX
- MOVQ $0x00000100, CX
- LEAQ 24(SP), DX
+TEXT ·calcBlockSize(SB), $24-40
+ MOVQ tmp+24(FP), AX
+ XORQ CX, CX
+ MOVQ $0x00000100, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_calcBlockSize:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_calcBlockSize
MOVL $0x00000000, 12(SP)
- MOVQ src_len+8(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+0(FP), BX
search_loop_calcBlockSize:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x05, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x05, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_calcBlockSize
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x33, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x10, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x33, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x10, R9
- IMULQ R8, R9
- SHRQ $0x33, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x10, R11
+ IMULQ R9, R11
+ SHRQ $0x33, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x10, R10
+ IMULQ R9, R10
+ SHRQ $0x33, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_calcBlockSize
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_calcBlockSize
repeat_extend_back_loop_calcBlockSize:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_calcBlockSize
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_calcBlockSize
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_calcBlockSize
repeat_extend_back_end_calcBlockSize:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 5(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 5(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_calcBlockSize
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
repeat_dst_size_check_calcBlockSize:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_calcBlockSize
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_calcBlockSize
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_calcBlockSize
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB three_bytes_repeat_emit_calcBlockSize
- CMPL BX, $0x01000000
+ CMPL SI, $0x01000000
JB four_bytes_repeat_emit_calcBlockSize
- ADDQ $0x05, AX
+ ADDQ $0x05, CX
JMP memmove_long_repeat_emit_calcBlockSize
four_bytes_repeat_emit_calcBlockSize:
- ADDQ $0x04, AX
+ ADDQ $0x04, CX
JMP memmove_long_repeat_emit_calcBlockSize
three_bytes_repeat_emit_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_calcBlockSize
two_bytes_repeat_emit_calcBlockSize:
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_calcBlockSize
JMP memmove_long_repeat_emit_calcBlockSize
one_byte_repeat_emit_calcBlockSize:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_repeat_emit_calcBlockSize:
- LEAQ (AX)(DI*1), AX
+ LEAQ (CX)(R8*1), CX
JMP emit_literal_done_repeat_emit_calcBlockSize
memmove_long_repeat_emit_calcBlockSize:
- LEAQ (AX)(DI*1), AX
+ LEAQ (CX)(R8*1), CX
emit_literal_done_repeat_emit_calcBlockSize:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+8(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+8(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_calcBlockSize:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_calcBlockSize
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_calcBlockSize
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_calcBlockSize
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_calcBlockSize
matchlen_bsf_16repeat_extend_calcBlockSize:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_calcBlockSize
matchlen_match8_repeat_extend_calcBlockSize:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_calcBlockSize
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_calcBlockSize
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_calcBlockSize
matchlen_bsf_8_repeat_extend_calcBlockSize:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_calcBlockSize
matchlen_match4_repeat_extend_calcBlockSize:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_calcBlockSize
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_calcBlockSize
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_calcBlockSize:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_calcBlockSize
JB repeat_extend_forward_end_calcBlockSize
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_calcBlockSize
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_calcBlockSize
matchlen_match1_repeat_extend_calcBlockSize:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_calcBlockSize
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_calcBlockSize:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB two_byte_offset_repeat_as_copy_calcBlockSize
four_bytes_loop_back_repeat_as_copy_calcBlockSize:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE four_bytes_remain_repeat_as_copy_calcBlockSize
- LEAL -64(BX), BX
- ADDQ $0x05, AX
- CMPL BX, $0x04
+ LEAL -64(SI), SI
+ ADDQ $0x05, CX
+ CMPL SI, $0x04
JB four_bytes_remain_repeat_as_copy_calcBlockSize
JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize
four_bytes_remain_repeat_as_copy_calcBlockSize:
- TESTL BX, BX
+ TESTL SI, SI
JZ repeat_end_emit_calcBlockSize
- XORL BX, BX
- ADDQ $0x05, AX
+ XORL SI, SI
+ ADDQ $0x05, CX
JMP repeat_end_emit_calcBlockSize
two_byte_offset_repeat_as_copy_calcBlockSize:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_calcBlockSize
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_calcBlockSize
two_byte_offset_short_repeat_as_copy_calcBlockSize:
- MOVL BX, DI
- SHLL $0x02, DI
- CMPL BX, $0x0c
+ MOVL SI, R8
+ SHLL $0x02, R8
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_calcBlockSize
- CMPL SI, $0x00000800
+ CMPL DI, $0x00000800
JAE emit_copy_three_repeat_as_copy_calcBlockSize
- ADDQ $0x02, AX
+ ADDQ $0x02, CX
JMP repeat_end_emit_calcBlockSize
emit_copy_three_repeat_as_copy_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
repeat_end_emit_calcBlockSize:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_calcBlockSize
no_repeat_found_calcBlockSize:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_calcBlockSize
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_calcBlockSize
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_calcBlockSize
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_calcBlockSize
candidate3_match_calcBlockSize:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_calcBlockSize
candidate2_match_calcBlockSize:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_calcBlockSize:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_calcBlockSize
match_extend_back_loop_calcBlockSize:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_calcBlockSize
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_calcBlockSize
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_calcBlockSize
JMP match_extend_back_loop_calcBlockSize
match_extend_back_end_calcBlockSize:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 5(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 5(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_calcBlockSize
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
match_dst_size_check_calcBlockSize:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_calcBlockSize
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), DI
+ CMPL DI, $0x3c
JB one_byte_match_emit_calcBlockSize
- CMPL SI, $0x00000100
+ CMPL DI, $0x00000100
JB two_bytes_match_emit_calcBlockSize
- CMPL SI, $0x00010000
+ CMPL DI, $0x00010000
JB three_bytes_match_emit_calcBlockSize
- CMPL SI, $0x01000000
+ CMPL DI, $0x01000000
JB four_bytes_match_emit_calcBlockSize
- ADDQ $0x05, AX
+ ADDQ $0x05, CX
JMP memmove_long_match_emit_calcBlockSize
four_bytes_match_emit_calcBlockSize:
- ADDQ $0x04, AX
+ ADDQ $0x04, CX
JMP memmove_long_match_emit_calcBlockSize
three_bytes_match_emit_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_calcBlockSize
two_bytes_match_emit_calcBlockSize:
- ADDQ $0x02, AX
- CMPL SI, $0x40
+ ADDQ $0x02, CX
+ CMPL DI, $0x40
JB memmove_match_emit_calcBlockSize
JMP memmove_long_match_emit_calcBlockSize
one_byte_match_emit_calcBlockSize:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_match_emit_calcBlockSize:
- LEAQ (AX)(R8*1), AX
+ LEAQ (CX)(R9*1), CX
JMP emit_literal_done_match_emit_calcBlockSize
memmove_long_match_emit_calcBlockSize:
- LEAQ (AX)(R8*1), AX
+ LEAQ (CX)(R9*1), CX
emit_literal_done_match_emit_calcBlockSize:
match_nolit_loop_calcBlockSize:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+8(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+8(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_calcBlockSize:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_calcBlockSize
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_calcBlockSize
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_calcBlockSize
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_calcBlockSize
matchlen_bsf_16match_nolit_calcBlockSize:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_calcBlockSize
matchlen_match8_match_nolit_calcBlockSize:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_calcBlockSize
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_calcBlockSize
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_calcBlockSize
matchlen_bsf_8_match_nolit_calcBlockSize:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_calcBlockSize
matchlen_match4_match_nolit_calcBlockSize:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_calcBlockSize
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_calcBlockSize
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_calcBlockSize:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_calcBlockSize
JB match_nolit_end_calcBlockSize
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_calcBlockSize
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_calcBlockSize
matchlen_match1_match_nolit_calcBlockSize:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_calcBlockSize
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_calcBlockSize:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
- CMPL BX, $0x00010000
+ CMPL SI, $0x00010000
JB two_byte_offset_match_nolit_calcBlockSize
four_bytes_loop_back_match_nolit_calcBlockSize:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE four_bytes_remain_match_nolit_calcBlockSize
- LEAL -64(R9), R9
- ADDQ $0x05, AX
- CMPL R9, $0x04
+ LEAL -64(R10), R10
+ ADDQ $0x05, CX
+ CMPL R10, $0x04
JB four_bytes_remain_match_nolit_calcBlockSize
JMP four_bytes_loop_back_match_nolit_calcBlockSize
four_bytes_remain_match_nolit_calcBlockSize:
- TESTL R9, R9
+ TESTL R10, R10
JZ match_nolit_emitcopy_end_calcBlockSize
- XORL BX, BX
- ADDQ $0x05, AX
+ XORL SI, SI
+ ADDQ $0x05, CX
JMP match_nolit_emitcopy_end_calcBlockSize
two_byte_offset_match_nolit_calcBlockSize:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_calcBlockSize
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_calcBlockSize
two_byte_offset_short_match_nolit_calcBlockSize:
- MOVL R9, SI
- SHLL $0x02, SI
- CMPL R9, $0x0c
+ MOVL R10, DI
+ SHLL $0x02, DI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_calcBlockSize
- CMPL BX, $0x00000800
+ CMPL SI, $0x00000800
JAE emit_copy_three_match_nolit_calcBlockSize
- ADDQ $0x02, AX
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_calcBlockSize
emit_copy_three_match_nolit_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_calcBlockSize:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_calcBlockSize
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_calcBlockSize
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
match_nolit_dst_ok_calcBlockSize:
- MOVQ $0x0000cf1bbcdcbf9b, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x10, DI
- IMULQ R8, DI
- SHRQ $0x33, DI
- SHLQ $0x10, BX
- IMULQ R8, BX
- SHRQ $0x33, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x0000cf1bbcdcbf9b, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x10, R8
+ IMULQ R9, R8
+ SHRQ $0x33, R8
+ SHLQ $0x10, SI
+ IMULQ R9, SI
+ SHRQ $0x33, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_calcBlockSize
- INCL CX
+ INCL DX
JMP search_loop_calcBlockSize
emit_remainder_calcBlockSize:
- MOVQ src_len+8(FP), CX
- SUBL 12(SP), CX
- LEAQ 5(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+8(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 5(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_calcBlockSize
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
emit_remainder_ok_calcBlockSize:
- MOVQ src_len+8(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+8(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_calcBlockSize
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
- LEAL -1(SI), CX
- CMPL CX, $0x3c
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
+ LEAL -1(SI), AX
+ CMPL AX, $0x3c
JB one_byte_emit_remainder_calcBlockSize
- CMPL CX, $0x00000100
+ CMPL AX, $0x00000100
JB two_bytes_emit_remainder_calcBlockSize
- CMPL CX, $0x00010000
+ CMPL AX, $0x00010000
JB three_bytes_emit_remainder_calcBlockSize
- CMPL CX, $0x01000000
+ CMPL AX, $0x01000000
JB four_bytes_emit_remainder_calcBlockSize
- ADDQ $0x05, AX
+ ADDQ $0x05, CX
JMP memmove_long_emit_remainder_calcBlockSize
four_bytes_emit_remainder_calcBlockSize:
- ADDQ $0x04, AX
+ ADDQ $0x04, CX
JMP memmove_long_emit_remainder_calcBlockSize
three_bytes_emit_remainder_calcBlockSize:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_calcBlockSize
two_bytes_emit_remainder_calcBlockSize:
- ADDQ $0x02, AX
- CMPL CX, $0x40
+ ADDQ $0x02, CX
+ CMPL AX, $0x40
JB memmove_emit_remainder_calcBlockSize
JMP memmove_long_emit_remainder_calcBlockSize
one_byte_emit_remainder_calcBlockSize:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_emit_remainder_calcBlockSize:
- LEAQ (AX)(SI*1), AX
+ LEAQ (CX)(SI*1), AX
+ MOVQ AX, CX
JMP emit_literal_done_emit_remainder_calcBlockSize
memmove_long_emit_remainder_calcBlockSize:
- LEAQ (AX)(SI*1), AX
+ LEAQ (CX)(SI*1), AX
+ MOVQ AX, CX
emit_literal_done_emit_remainder_calcBlockSize:
- MOVQ AX, ret+24(FP)
+ MOVQ CX, ret+32(FP)
RET
-// func calcBlockSizeSmall(src []byte) int
+// func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int
// Requires: BMI, SSE2
-TEXT ·calcBlockSizeSmall(SB), $2072-32
- XORQ AX, AX
- MOVQ $0x00000010, CX
- LEAQ 24(SP), DX
+TEXT ·calcBlockSizeSmall(SB), $24-40
+ MOVQ tmp+24(FP), AX
+ XORQ CX, CX
+ MOVQ $0x00000010, DX
+ MOVQ AX, BX
PXOR X0, X0
zero_loop_calcBlockSizeSmall:
- MOVOU X0, (DX)
- MOVOU X0, 16(DX)
- MOVOU X0, 32(DX)
- MOVOU X0, 48(DX)
- MOVOU X0, 64(DX)
- MOVOU X0, 80(DX)
- MOVOU X0, 96(DX)
- MOVOU X0, 112(DX)
- ADDQ $0x80, DX
- DECQ CX
+ MOVOU X0, (BX)
+ MOVOU X0, 16(BX)
+ MOVOU X0, 32(BX)
+ MOVOU X0, 48(BX)
+ MOVOU X0, 64(BX)
+ MOVOU X0, 80(BX)
+ MOVOU X0, 96(BX)
+ MOVOU X0, 112(BX)
+ ADDQ $0x80, BX
+ DECQ DX
JNZ zero_loop_calcBlockSizeSmall
MOVL $0x00000000, 12(SP)
- MOVQ src_len+8(FP), CX
- LEAQ -9(CX), DX
- LEAQ -8(CX), BX
- MOVL BX, 8(SP)
- SHRQ $0x05, CX
- SUBL CX, DX
- LEAQ (AX)(DX*1), DX
- MOVQ DX, (SP)
- MOVL $0x00000001, CX
- MOVL CX, 16(SP)
- MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), DX
+ LEAQ -9(DX), BX
+ LEAQ -8(DX), SI
+ MOVL SI, 8(SP)
+ SHRQ $0x05, DX
+ SUBL DX, BX
+ LEAQ (CX)(BX*1), BX
+ MOVQ BX, (SP)
+ MOVL $0x00000001, DX
+ MOVL DX, 16(SP)
+ MOVQ src_base+0(FP), BX
search_loop_calcBlockSizeSmall:
- MOVL CX, BX
- SUBL 12(SP), BX
- SHRL $0x04, BX
- LEAL 4(CX)(BX*1), BX
- CMPL BX, 8(SP)
+ MOVL DX, SI
+ SUBL 12(SP), SI
+ SHRL $0x04, SI
+ LEAL 4(DX)(SI*1), SI
+ CMPL SI, 8(SP)
JAE emit_remainder_calcBlockSizeSmall
- MOVQ (DX)(CX*1), SI
- MOVL BX, 20(SP)
- MOVQ $0x9e3779b1, R8
- MOVQ SI, R9
- MOVQ SI, R10
- SHRQ $0x08, R10
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x37, R9
+ MOVQ (BX)(DX*1), DI
+ MOVL SI, 20(SP)
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R10
+ MOVQ DI, R11
+ SHRQ $0x08, R11
SHLQ $0x20, R10
- IMULQ R8, R10
+ IMULQ R9, R10
SHRQ $0x37, R10
- MOVL 24(SP)(R9*4), BX
- MOVL 24(SP)(R10*4), DI
- MOVL CX, 24(SP)(R9*4)
- LEAL 1(CX), R9
- MOVL R9, 24(SP)(R10*4)
- MOVQ SI, R9
- SHRQ $0x10, R9
- SHLQ $0x20, R9
- IMULQ R8, R9
- SHRQ $0x37, R9
- MOVL CX, R8
- SUBL 16(SP), R8
- MOVL 1(DX)(R8*1), R10
- MOVQ SI, R8
- SHRQ $0x08, R8
- CMPL R8, R10
+ SHLQ $0x20, R11
+ IMULQ R9, R11
+ SHRQ $0x37, R11
+ MOVL (AX)(R10*4), SI
+ MOVL (AX)(R11*4), R8
+ MOVL DX, (AX)(R10*4)
+ LEAL 1(DX), R10
+ MOVL R10, (AX)(R11*4)
+ MOVQ DI, R10
+ SHRQ $0x10, R10
+ SHLQ $0x20, R10
+ IMULQ R9, R10
+ SHRQ $0x37, R10
+ MOVL DX, R9
+ SUBL 16(SP), R9
+ MOVL 1(BX)(R9*1), R11
+ MOVQ DI, R9
+ SHRQ $0x08, R9
+ CMPL R9, R11
JNE no_repeat_found_calcBlockSizeSmall
- LEAL 1(CX), SI
- MOVL 12(SP), BX
- MOVL SI, DI
- SUBL 16(SP), DI
+ LEAL 1(DX), DI
+ MOVL 12(SP), SI
+ MOVL DI, R8
+ SUBL 16(SP), R8
JZ repeat_extend_back_end_calcBlockSizeSmall
repeat_extend_back_loop_calcBlockSizeSmall:
- CMPL SI, BX
+ CMPL DI, SI
JBE repeat_extend_back_end_calcBlockSizeSmall
- MOVB -1(DX)(DI*1), R8
- MOVB -1(DX)(SI*1), R9
- CMPB R8, R9
+ MOVB -1(BX)(R8*1), R9
+ MOVB -1(BX)(DI*1), R10
+ CMPB R9, R10
JNE repeat_extend_back_end_calcBlockSizeSmall
- LEAL -1(SI), SI
- DECL DI
+ LEAL -1(DI), DI
+ DECL R8
JNZ repeat_extend_back_loop_calcBlockSizeSmall
repeat_extend_back_end_calcBlockSizeSmall:
- MOVL SI, BX
- SUBL 12(SP), BX
- LEAQ 3(AX)(BX*1), BX
- CMPQ BX, (SP)
+ MOVL DI, SI
+ SUBL 12(SP), SI
+ LEAQ 3(CX)(SI*1), SI
+ CMPQ SI, (SP)
JB repeat_dst_size_check_calcBlockSizeSmall
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
repeat_dst_size_check_calcBlockSizeSmall:
- MOVL 12(SP), BX
- CMPL BX, SI
+ MOVL 12(SP), SI
+ CMPL SI, DI
JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall
- MOVL SI, DI
- MOVL SI, 12(SP)
- LEAQ (DX)(BX*1), R8
- SUBL BX, DI
- LEAL -1(DI), BX
- CMPL BX, $0x3c
+ MOVL DI, R8
+ MOVL DI, 12(SP)
+ LEAQ (BX)(SI*1), R9
+ SUBL SI, R8
+ LEAL -1(R8), SI
+ CMPL SI, $0x3c
JB one_byte_repeat_emit_calcBlockSizeSmall
- CMPL BX, $0x00000100
+ CMPL SI, $0x00000100
JB two_bytes_repeat_emit_calcBlockSizeSmall
JB three_bytes_repeat_emit_calcBlockSizeSmall
three_bytes_repeat_emit_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_repeat_emit_calcBlockSizeSmall
two_bytes_repeat_emit_calcBlockSizeSmall:
- ADDQ $0x02, AX
- CMPL BX, $0x40
+ ADDQ $0x02, CX
+ CMPL SI, $0x40
JB memmove_repeat_emit_calcBlockSizeSmall
JMP memmove_long_repeat_emit_calcBlockSizeSmall
one_byte_repeat_emit_calcBlockSizeSmall:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_repeat_emit_calcBlockSizeSmall:
- LEAQ (AX)(DI*1), AX
+ LEAQ (CX)(R8*1), CX
JMP emit_literal_done_repeat_emit_calcBlockSizeSmall
memmove_long_repeat_emit_calcBlockSizeSmall:
- LEAQ (AX)(DI*1), AX
+ LEAQ (CX)(R8*1), CX
emit_literal_done_repeat_emit_calcBlockSizeSmall:
- ADDL $0x05, CX
- MOVL CX, BX
- SUBL 16(SP), BX
- MOVQ src_len+8(FP), DI
- SUBL CX, DI
- LEAQ (DX)(CX*1), R8
- LEAQ (DX)(BX*1), BX
+ ADDL $0x05, DX
+ MOVL DX, SI
+ SUBL 16(SP), SI
+ MOVQ src_len+8(FP), R8
+ SUBL DX, R8
+ LEAQ (BX)(DX*1), R9
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R10, R10
+ XORL R11, R11
matchlen_loopback_16_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x10
+ CMPL R8, $0x10
JB matchlen_match8_repeat_extend_calcBlockSizeSmall
- MOVQ (R8)(R10*1), R9
- MOVQ 8(R8)(R10*1), R11
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ MOVQ 8(R9)(R11*1), R12
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall
- XORQ 8(BX)(R10*1), R11
+ XORQ 8(SI)(R11*1), R12
JNZ matchlen_bsf_16repeat_extend_calcBlockSizeSmall
- LEAL -16(DI), DI
- LEAL 16(R10), R10
+ LEAL -16(R8), R8
+ LEAL 16(R11), R11
JMP matchlen_loopback_16_repeat_extend_calcBlockSizeSmall
matchlen_bsf_16repeat_extend_calcBlockSizeSmall:
#ifdef GOAMD64_v3
- TZCNTQ R11, R11
+ TZCNTQ R12, R12
#else
- BSFQ R11, R11
+ BSFQ R12, R12
#endif
- SARQ $0x03, R11
- LEAL 8(R10)(R11*1), R10
+ SARQ $0x03, R12
+ LEAL 8(R11)(R12*1), R11
JMP repeat_extend_forward_end_calcBlockSizeSmall
matchlen_match8_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x08
+ CMPL R8, $0x08
JB matchlen_match4_repeat_extend_calcBlockSizeSmall
- MOVQ (R8)(R10*1), R9
- XORQ (BX)(R10*1), R9
+ MOVQ (R9)(R11*1), R10
+ XORQ (SI)(R11*1), R10
JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall
- LEAL -8(DI), DI
- LEAL 8(R10), R10
+ LEAL -8(R8), R8
+ LEAL 8(R11), R11
JMP matchlen_match4_repeat_extend_calcBlockSizeSmall
matchlen_bsf_8_repeat_extend_calcBlockSizeSmall:
#ifdef GOAMD64_v3
- TZCNTQ R9, R9
+ TZCNTQ R10, R10
#else
- BSFQ R9, R9
+ BSFQ R10, R10
#endif
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
JMP repeat_extend_forward_end_calcBlockSizeSmall
matchlen_match4_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x04
+ CMPL R8, $0x04
JB matchlen_match2_repeat_extend_calcBlockSizeSmall
- MOVL (R8)(R10*1), R9
- CMPL (BX)(R10*1), R9
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
JNE matchlen_match2_repeat_extend_calcBlockSizeSmall
- LEAL -4(DI), DI
- LEAL 4(R10), R10
+ LEAL -4(R8), R8
+ LEAL 4(R11), R11
matchlen_match2_repeat_extend_calcBlockSizeSmall:
- CMPL DI, $0x01
+ CMPL R8, $0x01
JE matchlen_match1_repeat_extend_calcBlockSizeSmall
JB repeat_extend_forward_end_calcBlockSizeSmall
- MOVW (R8)(R10*1), R9
- CMPW (BX)(R10*1), R9
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
JNE matchlen_match1_repeat_extend_calcBlockSizeSmall
- LEAL 2(R10), R10
- SUBL $0x02, DI
+ LEAL 2(R11), R11
+ SUBL $0x02, R8
JZ repeat_extend_forward_end_calcBlockSizeSmall
matchlen_match1_repeat_extend_calcBlockSizeSmall:
- MOVB (R8)(R10*1), R9
- CMPB (BX)(R10*1), R9
+ MOVB (R9)(R11*1), R10
+ CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_calcBlockSizeSmall
- LEAL 1(R10), R10
+ LEAL 1(R11), R11
repeat_extend_forward_end_calcBlockSizeSmall:
- ADDL R10, CX
- MOVL CX, BX
- SUBL SI, BX
- MOVL 16(SP), SI
+ ADDL R11, DX
+ MOVL DX, SI
+ SUBL DI, SI
+ MOVL 16(SP), DI
// emitCopy
two_byte_offset_repeat_as_copy_calcBlockSizeSmall:
- CMPL BX, $0x40
+ CMPL SI, $0x40
JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall
- LEAL -60(BX), BX
- ADDQ $0x03, AX
+ LEAL -60(SI), SI
+ ADDQ $0x03, CX
JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall
two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall:
- MOVL BX, SI
- SHLL $0x02, SI
- CMPL BX, $0x0c
+ MOVL SI, DI
+ SHLL $0x02, DI
+ CMPL SI, $0x0c
JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall
- ADDQ $0x02, AX
+ ADDQ $0x02, CX
JMP repeat_end_emit_calcBlockSizeSmall
emit_copy_three_repeat_as_copy_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
repeat_end_emit_calcBlockSizeSmall:
- MOVL CX, 12(SP)
+ MOVL DX, 12(SP)
JMP search_loop_calcBlockSizeSmall
no_repeat_found_calcBlockSizeSmall:
- CMPL (DX)(BX*1), SI
+ CMPL (BX)(SI*1), DI
JEQ candidate_match_calcBlockSizeSmall
- SHRQ $0x08, SI
- MOVL 24(SP)(R9*4), BX
- LEAL 2(CX), R8
- CMPL (DX)(DI*1), SI
+ SHRQ $0x08, DI
+ MOVL (AX)(R10*4), SI
+ LEAL 2(DX), R9
+ CMPL (BX)(R8*1), DI
JEQ candidate2_match_calcBlockSizeSmall
- MOVL R8, 24(SP)(R9*4)
- SHRQ $0x08, SI
- CMPL (DX)(BX*1), SI
+ MOVL R9, (AX)(R10*4)
+ SHRQ $0x08, DI
+ CMPL (BX)(SI*1), DI
JEQ candidate3_match_calcBlockSizeSmall
- MOVL 20(SP), CX
+ MOVL 20(SP), DX
JMP search_loop_calcBlockSizeSmall
candidate3_match_calcBlockSizeSmall:
- ADDL $0x02, CX
+ ADDL $0x02, DX
JMP candidate_match_calcBlockSizeSmall
candidate2_match_calcBlockSizeSmall:
- MOVL R8, 24(SP)(R9*4)
- INCL CX
- MOVL DI, BX
+ MOVL R9, (AX)(R10*4)
+ INCL DX
+ MOVL R8, SI
candidate_match_calcBlockSizeSmall:
- MOVL 12(SP), SI
- TESTL BX, BX
+ MOVL 12(SP), DI
+ TESTL SI, SI
JZ match_extend_back_end_calcBlockSizeSmall
match_extend_back_loop_calcBlockSizeSmall:
- CMPL CX, SI
+ CMPL DX, DI
JBE match_extend_back_end_calcBlockSizeSmall
- MOVB -1(DX)(BX*1), DI
- MOVB -1(DX)(CX*1), R8
- CMPB DI, R8
+ MOVB -1(BX)(SI*1), R8
+ MOVB -1(BX)(DX*1), R9
+ CMPB R8, R9
JNE match_extend_back_end_calcBlockSizeSmall
- LEAL -1(CX), CX
- DECL BX
+ LEAL -1(DX), DX
+ DECL SI
JZ match_extend_back_end_calcBlockSizeSmall
JMP match_extend_back_loop_calcBlockSizeSmall
match_extend_back_end_calcBlockSizeSmall:
- MOVL CX, SI
- SUBL 12(SP), SI
- LEAQ 3(AX)(SI*1), SI
- CMPQ SI, (SP)
+ MOVL DX, DI
+ SUBL 12(SP), DI
+ LEAQ 3(CX)(DI*1), DI
+ CMPQ DI, (SP)
JB match_dst_size_check_calcBlockSizeSmall
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
match_dst_size_check_calcBlockSizeSmall:
- MOVL CX, SI
- MOVL 12(SP), DI
- CMPL DI, SI
+ MOVL DX, DI
+ MOVL 12(SP), R8
+ CMPL R8, DI
JEQ emit_literal_done_match_emit_calcBlockSizeSmall
- MOVL SI, R8
- MOVL SI, 12(SP)
- LEAQ (DX)(DI*1), SI
- SUBL DI, R8
- LEAL -1(R8), SI
- CMPL SI, $0x3c
+ MOVL DI, R9
+ MOVL DI, 12(SP)
+ LEAQ (BX)(R8*1), DI
+ SUBL R8, R9
+ LEAL -1(R9), DI
+ CMPL DI, $0x3c
JB one_byte_match_emit_calcBlockSizeSmall
- CMPL SI, $0x00000100
+ CMPL DI, $0x00000100
JB two_bytes_match_emit_calcBlockSizeSmall
JB three_bytes_match_emit_calcBlockSizeSmall
three_bytes_match_emit_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_match_emit_calcBlockSizeSmall
two_bytes_match_emit_calcBlockSizeSmall:
- ADDQ $0x02, AX
- CMPL SI, $0x40
+ ADDQ $0x02, CX
+ CMPL DI, $0x40
JB memmove_match_emit_calcBlockSizeSmall
JMP memmove_long_match_emit_calcBlockSizeSmall
one_byte_match_emit_calcBlockSizeSmall:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_match_emit_calcBlockSizeSmall:
- LEAQ (AX)(R8*1), AX
+ LEAQ (CX)(R9*1), CX
JMP emit_literal_done_match_emit_calcBlockSizeSmall
memmove_long_match_emit_calcBlockSizeSmall:
- LEAQ (AX)(R8*1), AX
+ LEAQ (CX)(R9*1), CX
emit_literal_done_match_emit_calcBlockSizeSmall:
match_nolit_loop_calcBlockSizeSmall:
- MOVL CX, SI
- SUBL BX, SI
- MOVL SI, 16(SP)
- ADDL $0x04, CX
- ADDL $0x04, BX
- MOVQ src_len+8(FP), SI
- SUBL CX, SI
- LEAQ (DX)(CX*1), DI
- LEAQ (DX)(BX*1), BX
+ MOVL DX, DI
+ SUBL SI, DI
+ MOVL DI, 16(SP)
+ ADDL $0x04, DX
+ ADDL $0x04, SI
+ MOVQ src_len+8(FP), DI
+ SUBL DX, DI
+ LEAQ (BX)(DX*1), R8
+ LEAQ (BX)(SI*1), SI
// matchLen
- XORL R9, R9
+ XORL R10, R10
matchlen_loopback_16_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x10
+ CMPL DI, $0x10
JB matchlen_match8_match_nolit_calcBlockSizeSmall
- MOVQ (DI)(R9*1), R8
- MOVQ 8(DI)(R9*1), R10
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ MOVQ 8(R8)(R10*1), R11
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall
- XORQ 8(BX)(R9*1), R10
+ XORQ 8(SI)(R10*1), R11
JNZ matchlen_bsf_16match_nolit_calcBlockSizeSmall
- LEAL -16(SI), SI
- LEAL 16(R9), R9
+ LEAL -16(DI), DI
+ LEAL 16(R10), R10
JMP matchlen_loopback_16_match_nolit_calcBlockSizeSmall
matchlen_bsf_16match_nolit_calcBlockSizeSmall:
#ifdef GOAMD64_v3
- TZCNTQ R10, R10
+ TZCNTQ R11, R11
#else
- BSFQ R10, R10
+ BSFQ R11, R11
#endif
- SARQ $0x03, R10
- LEAL 8(R9)(R10*1), R9
+ SARQ $0x03, R11
+ LEAL 8(R10)(R11*1), R10
JMP match_nolit_end_calcBlockSizeSmall
matchlen_match8_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x08
+ CMPL DI, $0x08
JB matchlen_match4_match_nolit_calcBlockSizeSmall
- MOVQ (DI)(R9*1), R8
- XORQ (BX)(R9*1), R8
+ MOVQ (R8)(R10*1), R9
+ XORQ (SI)(R10*1), R9
JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall
- LEAL -8(SI), SI
- LEAL 8(R9), R9
+ LEAL -8(DI), DI
+ LEAL 8(R10), R10
JMP matchlen_match4_match_nolit_calcBlockSizeSmall
matchlen_bsf_8_match_nolit_calcBlockSizeSmall:
#ifdef GOAMD64_v3
- TZCNTQ R8, R8
+ TZCNTQ R9, R9
#else
- BSFQ R8, R8
+ BSFQ R9, R9
#endif
- SARQ $0x03, R8
- LEAL (R9)(R8*1), R9
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
JMP match_nolit_end_calcBlockSizeSmall
matchlen_match4_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x04
+ CMPL DI, $0x04
JB matchlen_match2_match_nolit_calcBlockSizeSmall
- MOVL (DI)(R9*1), R8
- CMPL (BX)(R9*1), R8
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
JNE matchlen_match2_match_nolit_calcBlockSizeSmall
- LEAL -4(SI), SI
- LEAL 4(R9), R9
+ LEAL -4(DI), DI
+ LEAL 4(R10), R10
matchlen_match2_match_nolit_calcBlockSizeSmall:
- CMPL SI, $0x01
+ CMPL DI, $0x01
JE matchlen_match1_match_nolit_calcBlockSizeSmall
JB match_nolit_end_calcBlockSizeSmall
- MOVW (DI)(R9*1), R8
- CMPW (BX)(R9*1), R8
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
JNE matchlen_match1_match_nolit_calcBlockSizeSmall
- LEAL 2(R9), R9
- SUBL $0x02, SI
+ LEAL 2(R10), R10
+ SUBL $0x02, DI
JZ match_nolit_end_calcBlockSizeSmall
matchlen_match1_match_nolit_calcBlockSizeSmall:
- MOVB (DI)(R9*1), R8
- CMPB (BX)(R9*1), R8
+ MOVB (R8)(R10*1), R9
+ CMPB (SI)(R10*1), R9
JNE match_nolit_end_calcBlockSizeSmall
- LEAL 1(R9), R9
+ LEAL 1(R10), R10
match_nolit_end_calcBlockSizeSmall:
- ADDL R9, CX
- MOVL 16(SP), BX
- ADDL $0x04, R9
- MOVL CX, 12(SP)
+ ADDL R10, DX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL DX, 12(SP)
// emitCopy
two_byte_offset_match_nolit_calcBlockSizeSmall:
- CMPL R9, $0x40
+ CMPL R10, $0x40
JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall
- LEAL -60(R9), R9
- ADDQ $0x03, AX
+ LEAL -60(R10), R10
+ ADDQ $0x03, CX
JMP two_byte_offset_match_nolit_calcBlockSizeSmall
two_byte_offset_short_match_nolit_calcBlockSizeSmall:
- MOVL R9, BX
- SHLL $0x02, BX
- CMPL R9, $0x0c
+ MOVL R10, SI
+ SHLL $0x02, SI
+ CMPL R10, $0x0c
JAE emit_copy_three_match_nolit_calcBlockSizeSmall
- ADDQ $0x02, AX
+ ADDQ $0x02, CX
JMP match_nolit_emitcopy_end_calcBlockSizeSmall
emit_copy_three_match_nolit_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
match_nolit_emitcopy_end_calcBlockSizeSmall:
- CMPL CX, 8(SP)
+ CMPL DX, 8(SP)
JAE emit_remainder_calcBlockSizeSmall
- MOVQ -2(DX)(CX*1), SI
- CMPQ AX, (SP)
+ MOVQ -2(BX)(DX*1), DI
+ CMPQ CX, (SP)
JB match_nolit_dst_ok_calcBlockSizeSmall
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
match_nolit_dst_ok_calcBlockSizeSmall:
- MOVQ $0x9e3779b1, R8
- MOVQ SI, DI
- SHRQ $0x10, SI
- MOVQ SI, BX
- SHLQ $0x20, DI
- IMULQ R8, DI
- SHRQ $0x37, DI
- SHLQ $0x20, BX
- IMULQ R8, BX
- SHRQ $0x37, BX
- LEAL -2(CX), R8
- LEAQ 24(SP)(BX*4), R9
- MOVL (R9), BX
- MOVL R8, 24(SP)(DI*4)
- MOVL CX, (R9)
- CMPL (DX)(BX*1), SI
+ MOVQ $0x9e3779b1, R9
+ MOVQ DI, R8
+ SHRQ $0x10, DI
+ MOVQ DI, SI
+ SHLQ $0x20, R8
+ IMULQ R9, R8
+ SHRQ $0x37, R8
+ SHLQ $0x20, SI
+ IMULQ R9, SI
+ SHRQ $0x37, SI
+ LEAL -2(DX), R9
+ LEAQ (AX)(SI*4), R10
+ MOVL (R10), SI
+ MOVL R9, (AX)(R8*4)
+ MOVL DX, (R10)
+ CMPL (BX)(SI*1), DI
JEQ match_nolit_loop_calcBlockSizeSmall
- INCL CX
+ INCL DX
JMP search_loop_calcBlockSizeSmall
emit_remainder_calcBlockSizeSmall:
- MOVQ src_len+8(FP), CX
- SUBL 12(SP), CX
- LEAQ 3(AX)(CX*1), CX
- CMPQ CX, (SP)
+ MOVQ src_len+8(FP), AX
+ SUBL 12(SP), AX
+ LEAQ 3(CX)(AX*1), AX
+ CMPQ AX, (SP)
JB emit_remainder_ok_calcBlockSizeSmall
- MOVQ $0x00000000, ret+24(FP)
+ MOVQ $0x00000000, ret+32(FP)
RET
emit_remainder_ok_calcBlockSizeSmall:
- MOVQ src_len+8(FP), CX
- MOVL 12(SP), BX
- CMPL BX, CX
+ MOVQ src_len+8(FP), AX
+ MOVL 12(SP), DX
+ CMPL DX, AX
JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall
- MOVL CX, SI
- MOVL CX, 12(SP)
- LEAQ (DX)(BX*1), CX
- SUBL BX, SI
- LEAL -1(SI), CX
- CMPL CX, $0x3c
+ MOVL AX, SI
+ MOVL AX, 12(SP)
+ LEAQ (BX)(DX*1), AX
+ SUBL DX, SI
+ LEAL -1(SI), AX
+ CMPL AX, $0x3c
JB one_byte_emit_remainder_calcBlockSizeSmall
- CMPL CX, $0x00000100
+ CMPL AX, $0x00000100
JB two_bytes_emit_remainder_calcBlockSizeSmall
JB three_bytes_emit_remainder_calcBlockSizeSmall
three_bytes_emit_remainder_calcBlockSizeSmall:
- ADDQ $0x03, AX
+ ADDQ $0x03, CX
JMP memmove_long_emit_remainder_calcBlockSizeSmall
two_bytes_emit_remainder_calcBlockSizeSmall:
- ADDQ $0x02, AX
- CMPL CX, $0x40
+ ADDQ $0x02, CX
+ CMPL AX, $0x40
JB memmove_emit_remainder_calcBlockSizeSmall
JMP memmove_long_emit_remainder_calcBlockSizeSmall
one_byte_emit_remainder_calcBlockSizeSmall:
- ADDQ $0x01, AX
+ ADDQ $0x01, CX
memmove_emit_remainder_calcBlockSizeSmall:
- LEAQ (AX)(SI*1), AX
+ LEAQ (CX)(SI*1), AX
+ MOVQ AX, CX
JMP emit_literal_done_emit_remainder_calcBlockSizeSmall
memmove_long_emit_remainder_calcBlockSizeSmall:
- LEAQ (AX)(SI*1), AX
+ LEAQ (CX)(SI*1), AX
+ MOVQ AX, CX
emit_literal_done_emit_remainder_calcBlockSizeSmall:
- MOVQ AX, ret+24(FP)
+ MOVQ CX, ret+32(FP)
RET
// func emitLiteral(dst []byte, lit []byte) int
@@ -19783,7 +19809,7 @@ TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
- LEAQ -10(AX)(CX*1), CX
+ LEAQ -8(AX)(CX*1), CX
XORQ DI, DI
lz4_s2_loop:
@@ -20266,7 +20292,7 @@ TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
- LEAQ -10(AX)(CX*1), CX
+ LEAQ -8(AX)(CX*1), CX
XORQ DI, DI
lz4s_s2_loop:
@@ -20751,7 +20777,7 @@ TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
- LEAQ -10(AX)(CX*1), CX
+ LEAQ -8(AX)(CX*1), CX
lz4_snappy_loop:
CMPQ DX, BX
@@ -21017,7 +21043,7 @@ TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
- LEAQ -10(AX)(CX*1), CX
+ LEAQ -8(AX)(CX*1), CX
lz4s_snappy_loop:
CMPQ DX, BX
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index a79c4a527c62c..8f8223cd3a678 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -6,6 +6,7 @@ package zstd
import (
"crypto/rand"
+ "errors"
"fmt"
"io"
"math"
@@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
// and write CRC if requested.
func (e *Encoder) Write(p []byte) (n int, err error) {
s := &e.state
+ if s.eofWritten {
+ return 0, ErrEncoderClosed
+ }
for len(p) > 0 {
if len(p)+len(s.filling) < e.o.blockSize {
if e.o.crc {
@@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
s.wg.Add(1)
+ if final {
+ s.eofWritten = true
+ }
go func(src []byte) {
if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final)
@@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
blk := enc.Block()
enc.Encode(blk, src)
blk.last = final
- if final {
- s.eofWritten = true
- }
// Wait for pending writes.
s.wWg.Wait()
if s.writeErr != nil {
@@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
if len(s.filling) > 0 {
err := e.nextBlock(false)
if err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
}
s.wg.Wait()
s.wWg.Wait()
if s.err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return s.err
}
return s.writeErr
@@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
}
err := e.nextBlock(true)
if err != nil {
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
if s.frameContentSize > 0 {
@@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
}
_, s.err = s.w.Write(frame)
}
+ if s.err == nil {
+ s.err = ErrEncoderClosed
+ return nil
+ }
+
return s.err
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 4be7cc73671b3..066bef2a4f0e9 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -88,6 +88,10 @@ var (
// Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close")
+ // ErrEncoderClosed will be returned if the Encoder was used after
+ // Close has been called.
+ ErrEncoderClosed = errors.New("encoder used after Close")
+
// ErrDecoderNilInput is returned when a nil Reader was provided
// and an operation other than Reset/DecodeAll/Close was attempted.
ErrDecoderNilInput = errors.New("nil input provided as reader")
diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile
index 68444aa681d42..9e4ddc4c88ad4 100644
--- a/vendor/github.com/minio/minio-go/v7/Makefile
+++ b/vendor/github.com/minio/minio-go/v7/Makefile
@@ -32,6 +32,10 @@ functional-test:
@GO111MODULE=on go build -race functional_tests.go
@SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests
+functional-test-notls:
+ @GO111MODULE=on go build -race functional_tests.go
+ @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=0 MINT_MODE=full ./functional_tests
+
clean:
@echo "Cleaning up all the generated files"
@find . -name '*.test' | xargs rm -fv
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
index d769648a7ef34..10131a5be630d 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -45,6 +45,8 @@ const (
ReplicationStatusFailed ReplicationStatus = "FAILED"
// ReplicationStatusReplica indicates object is a replica of a source
ReplicationStatusReplica ReplicationStatus = "REPLICA"
+ // ReplicationStatusReplicaEdge indicates object is a replica of a edge source
+ ReplicationStatusReplicaEdge ReplicationStatus = "REPLICA-EDGE"
)
// Empty returns true if no replication status set.
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index 1d6b6650250a3..90e9b63f5b14a 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -128,7 +128,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.77"
+ libraryVersion = "v7.0.78"
)
// User Agent should always following the below style.
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index 780dc899795c8..c0180b36b7015 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -3565,16 +3565,10 @@ func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.Obje
}
}
- hasFullObjectChecksum := true
- if OA.Checksum.ChecksumCRC32 == "" {
- if OA.Checksum.ChecksumCRC32C == "" {
- if OA.Checksum.ChecksumSHA1 == "" {
- if OA.Checksum.ChecksumSHA256 == "" {
- hasFullObjectChecksum = false
- }
- }
- }
- }
+ hasFullObjectChecksum := (OA.Checksum.ChecksumCRC32 != "" ||
+ OA.Checksum.ChecksumCRC32C != "" ||
+ OA.Checksum.ChecksumSHA1 != "" ||
+ OA.Checksum.ChecksumSHA256 != "")
if test.HasFullChecksum {
if !hasFullObjectChecksum {
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
index 596d951525dd2..f1c76c78ea0a3 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -25,6 +25,7 @@ import (
"io"
"net/http"
"net/url"
+ "os"
"strconv"
"strings"
"time"
@@ -85,29 +86,59 @@ type STSWebIdentity struct {
// assuming.
RoleARN string
+ // Policy is the policy where the credentials should be limited too.
+ Policy string
+
// roleSessionName is the identifier for the assumed role session.
roleSessionName string
}
// NewSTSWebIdentity returns a pointer to a new
// Credentials object wrapping the STSWebIdentity.
-func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
+func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error), opts ...func(*STSWebIdentity)) (*Credentials, error) {
if stsEndpoint == "" {
return nil, errors.New("STS endpoint cannot be empty")
}
if getWebIDTokenExpiry == nil {
return nil, errors.New("Web ID token and expiry retrieval function should be defined")
}
- return New(&STSWebIdentity{
+ i := &STSWebIdentity{
Client: &http.Client{
Transport: http.DefaultTransport,
},
STSEndpoint: stsEndpoint,
GetWebIDTokenExpiry: getWebIDTokenExpiry,
- }), nil
+ }
+ for _, o := range opts {
+ o(i)
+ }
+ return New(i), nil
+}
+
+// NewKubernetesIdentity returns a pointer to a new
+// Credentials object using the Kubernetes service account
+func NewKubernetesIdentity(stsEndpoint string, opts ...func(*STSWebIdentity)) (*Credentials, error) {
+ return NewSTSWebIdentity(stsEndpoint, func() (*WebIdentityToken, error) {
+ token, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
+ if err != nil {
+ return nil, err
+ }
+
+ return &WebIdentityToken{
+ Token: string(token),
+ }, nil
+ }, opts...)
+}
+
+// WithPolicy option will enforce that the returned credentials
+// will be scoped down to the specified policy
+func WithPolicy(policy string) func(*STSWebIdentity) {
+ return func(i *STSWebIdentity) {
+ i.Policy = policy
+ }
}
-func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
+func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, policy string,
getWebIDTokenExpiry func() (*WebIdentityToken, error),
) (AssumeRoleWithWebIdentityResponse, error) {
idToken, err := getWebIDTokenExpiry()
@@ -133,6 +164,9 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
if idToken.Expiry > 0 {
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
}
+ if policy != "" {
+ v.Set("Policy", policy)
+ }
v.Set("Version", STSVersion)
u, err := url.Parse(endpoint)
@@ -183,7 +217,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
// Retrieve retrieves credentials from the MinIO service.
// Error will be returned if the request fails.
func (m *STSWebIdentity) Retrieve() (Value, error) {
- a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry)
+ a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry)
if err != nil {
return Value{}, err
}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
index 7a84a6f349edb..33465c6326de7 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
@@ -69,7 +69,7 @@ const (
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
// borrowed from this article and also testing various ASCII characters following regex
// is supported by AWS S3 for both tags and values.
-var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`)
+var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ =]+$`)
func checkKey(key string) error {
if len(key) == 0 {
diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore
new file mode 100644
index 0000000000000..d8b32652e5a92
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+.DS_Store
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+
+*.swp
diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE
new file mode 100644
index 0000000000000..99d2e9dc8ff27
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2012 Dave Cheney. All rights reserved.
+Copyright (c) 2014 Kuba Podgórski. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md
new file mode 100644
index 0000000000000..0662c0208c572
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/README.md
@@ -0,0 +1,45 @@
+[](http://godoc.org/github.com/pkg/xattr)
+[](https://goreportcard.com/report/github.com/pkg/xattr)
+[](https://github.com/pkg/xattr/actions?query=workflow%3Abuild)
+[](https://codecov.io/gh/pkg/xattr)
+
+xattr
+=====
+Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris).
+
+"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes)
+
+`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored.
+
+The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they
+do not reference a symlink that appears at the end of a path. See
+[GoDoc](http://godoc.org/github.com/pkg/xattr) for details.
+
+### Example
+```go
+ const path = "/tmp/myfile"
+ const prefix = "user."
+
+ if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil {
+ log.Fatal(err)
+ }
+
+ var list []string
+ if list, err = xattr.List(path); err != nil {
+ log.Fatal(err)
+ }
+
+ var data []byte
+ if data, err = xattr.Get(path, prefix+"test"); err != nil {
+ log.Fatal(err)
+ }
+
+ if err = xattr.Remove(path, prefix+"test"); err != nil {
+ log.Fatal(err)
+ }
+
+ // One can also specify the flags parameter to be passed to the OS.
+ if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil {
+ log.Fatal(err)
+ }
+```
diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go
new file mode 100644
index 0000000000000..e34e274d51373
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr.go
@@ -0,0 +1,258 @@
+/*
+Package xattr provides support for extended attributes on linux, darwin and freebsd.
+Extended attributes are name:value pairs associated permanently with files and directories,
+similar to the environment strings associated with a process.
+An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty.
+More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes .
+
+All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L"
+variant will not follow a symlink at the end of the path, and "F" variant accepts
+a file descriptor instead of a path.
+
+Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are
+symlinks:
+Get will follow "symlink1" and "symlink2" and operate on the target of
+"symlink2". LGet will follow "symlink1" but operate directly on "symlink2".
+*/
+package xattr
+
+import (
+ "os"
+ "syscall"
+)
+
+// Error records an error and the operation, file path and attribute that caused it.
+type Error struct {
+ Op string
+ Path string
+ Name string
+ Err error
+}
+
+func (e *Error) Unwrap() error { return e.Err }
+
+func (e *Error) Error() (errstr string) {
+ if e.Op != "" {
+ errstr += e.Op
+ }
+ if e.Path != "" {
+ if errstr != "" {
+ errstr += " "
+ }
+ errstr += e.Path
+ }
+ if e.Name != "" {
+ if errstr != "" {
+ errstr += " "
+ }
+ errstr += e.Name
+ }
+ if e.Err != nil {
+ if errstr != "" {
+ errstr += ": "
+ }
+ errstr += e.Err.Error()
+ }
+ return
+}
+
+// Get retrieves extended attribute data associated with path. It will follow
+// all symlinks along the path.
+func Get(path, name string) ([]byte, error) {
+ return get(path, name, func(name string, data []byte) (int, error) {
+ return getxattr(path, name, data)
+ })
+}
+
+// LGet is like Get but does not follow a symlink at the end of the path.
+func LGet(path, name string) ([]byte, error) {
+ return get(path, name, func(name string, data []byte) (int, error) {
+ return lgetxattr(path, name, data)
+ })
+}
+
+// FGet is like Get but accepts a os.File instead of a file path.
+func FGet(f *os.File, name string) ([]byte, error) {
+ return get(f.Name(), name, func(name string, data []byte) (int, error) {
+ return fgetxattr(f, name, data)
+ })
+}
+
+type getxattrFunc func(name string, data []byte) (int, error)
+
+// get contains the buffer allocation logic used by both Get and LGet.
+func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) {
+ const (
+ // Start with a 1 KB buffer for the xattr value
+ initialBufSize = 1024
+
+ // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's
+ // much smaller: documented at 64 KB. However, at least on TrueNAS SCALE, a
+ // Debian-based Linux distro, it can be larger.
+ maxBufSize = 64 * 1024 * 1024
+
+ // Function name as reported in error messages
+ myname = "xattr.get"
+ )
+
+ size := initialBufSize
+ for {
+ data := make([]byte, size)
+ read, err := getxattrFunc(name, data)
+
+ // If the buffer was too small to fit the value, Linux and MacOS react
+ // differently:
+ // Linux: returns an ERANGE error and "-1" bytes. However, the TrueNAS
+ // SCALE distro sometimes returns E2BIG.
+ // MacOS: truncates the value and returns "size" bytes. If the value
+ // happens to be exactly as big as the buffer, we cannot know if it was
+ // truncated, and we retry with a bigger buffer. Contrary to documentation,
+ // MacOS never seems to return ERANGE!
+ // To keep the code simple, we always check both conditions, and sometimes
+ // double the buffer size without it being strictly necessary.
+ if err == syscall.ERANGE || err == syscall.E2BIG || read == size {
+ // The buffer was too small. Try again.
+ size <<= 1
+ if size >= maxBufSize {
+ return nil, &Error{myname, path, name, syscall.EOVERFLOW}
+ }
+ continue
+ }
+ if err != nil {
+ return nil, &Error{myname, path, name, err}
+ }
+ return data[:read], nil
+ }
+}
+
+// Set associates name and data together as an attribute of path.
+func Set(path, name string, data []byte) error {
+ if err := setxattr(path, name, data, 0); err != nil {
+ return &Error{"xattr.Set", path, name, err}
+ }
+ return nil
+}
+
+// LSet is like Set but does not follow a symlink at
+// the end of the path.
+func LSet(path, name string, data []byte) error {
+ if err := lsetxattr(path, name, data, 0); err != nil {
+ return &Error{"xattr.LSet", path, name, err}
+ }
+ return nil
+}
+
+// FSet is like Set but accepts a os.File instead of a file path.
+func FSet(f *os.File, name string, data []byte) error {
+ if err := fsetxattr(f, name, data, 0); err != nil {
+ return &Error{"xattr.FSet", f.Name(), name, err}
+ }
+ return nil
+}
+
+// SetWithFlags associates name and data together as an attribute of path.
+// Forwards the flags parameter to the syscall layer.
+func SetWithFlags(path, name string, data []byte, flags int) error {
+ if err := setxattr(path, name, data, flags); err != nil {
+ return &Error{"xattr.SetWithFlags", path, name, err}
+ }
+ return nil
+}
+
+// LSetWithFlags is like SetWithFlags but does not follow a symlink at
+// the end of the path.
+func LSetWithFlags(path, name string, data []byte, flags int) error {
+ if err := lsetxattr(path, name, data, flags); err != nil {
+ return &Error{"xattr.LSetWithFlags", path, name, err}
+ }
+ return nil
+}
+
+// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path.
+func FSetWithFlags(f *os.File, name string, data []byte, flags int) error {
+ if err := fsetxattr(f, name, data, flags); err != nil {
+ return &Error{"xattr.FSetWithFlags", f.Name(), name, err}
+ }
+ return nil
+}
+
+// Remove removes the attribute associated with the given path.
+func Remove(path, name string) error {
+ if err := removexattr(path, name); err != nil {
+ return &Error{"xattr.Remove", path, name, err}
+ }
+ return nil
+}
+
+// LRemove is like Remove but does not follow a symlink at the end of the
+// path.
+func LRemove(path, name string) error {
+ if err := lremovexattr(path, name); err != nil {
+ return &Error{"xattr.LRemove", path, name, err}
+ }
+ return nil
+}
+
+// FRemove is like Remove but accepts a os.File instead of a file path.
+func FRemove(f *os.File, name string) error {
+ if err := fremovexattr(f, name); err != nil {
+ return &Error{"xattr.FRemove", f.Name(), name, err}
+ }
+ return nil
+}
+
+// List retrieves a list of names of extended attributes associated
+// with the given path in the file system.
+func List(path string) ([]string, error) {
+ return list(path, func(data []byte) (int, error) {
+ return listxattr(path, data)
+ })
+}
+
+// LList is like List but does not follow a symlink at the end of the
+// path.
+func LList(path string) ([]string, error) {
+ return list(path, func(data []byte) (int, error) {
+ return llistxattr(path, data)
+ })
+}
+
+// FList is like List but accepts a os.File instead of a file path.
+func FList(f *os.File) ([]string, error) {
+ return list(f.Name(), func(data []byte) (int, error) {
+ return flistxattr(f, data)
+ })
+}
+
+type listxattrFunc func(data []byte) (int, error)
+
+// list contains the buffer allocation logic used by both List and LList.
+func list(path string, listxattrFunc listxattrFunc) ([]string, error) {
+ myname := "xattr.list"
+ // find size.
+ size, err := listxattrFunc(nil)
+ if err != nil {
+ return nil, &Error{myname, path, "", err}
+ }
+ if size > 0 {
+ // `size + 1` because of ERANGE error when reading
+ // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16).
+ buf := make([]byte, size+1)
+ // Read into buffer of that size.
+ read, err := listxattrFunc(buf)
+ if err != nil {
+ return nil, &Error{myname, path, "", err}
+ }
+ return stringsFromByteSlice(buf[:read]), nil
+ }
+ return []string{}, nil
+}
+
+// bytePtrFromSlice returns a pointer to array of bytes and a size.
+func bytePtrFromSlice(data []byte) (ptr *byte, size int) {
+ size = len(data)
+ if size > 0 {
+ ptr = &data[0]
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go
new file mode 100644
index 0000000000000..f4a3f95390490
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_bsd.go
@@ -0,0 +1,201 @@
+//go:build freebsd || netbsd
+// +build freebsd netbsd
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // XATTR_SUPPORTED will be true if the current platform is supported
+ XATTR_SUPPORTED = true
+
+ EXTATTR_NAMESPACE_USER = 1
+
+ // ENOATTR is not exported by the syscall package on Linux, because it is
+ // an alias for ENODATA. We export it here so it is available on all
+ // our supported platforms.
+ ENOATTR = syscall.ENOATTR
+)
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data)
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data)
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ return getxattr(f.Name(), name, data)
+}
+
+// sysGet is called by getxattr and lgetxattr with the appropriate syscall
+// number. This works because syscalls have the same signature and return
+// values.
+func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) {
+ ptr, nbytes := bytePtrFromSlice(data)
+ /*
+ ssize_t extattr_get_file(
+ const char *path,
+ int attrnamespace,
+ const char *attrname,
+ void *data,
+ size_t nbytes);
+
+ ssize_t extattr_get_link(
+ const char *path,
+ int attrnamespace,
+ const char *attrname,
+ void *data,
+ size_t nbytes);
+ */
+ r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
+ EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
+ uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0)
+ if err != syscall.Errno(0) {
+ return int(r0), err
+ }
+ return int(r0), nil
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data)
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data)
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ return setxattr(f.Name(), name, data, flags)
+}
+
+// sysSet is called by setxattr and lsetxattr with the appropriate syscall
+// number. This works because syscalls have the same signature and return
+// values.
+func sysSet(syscallNum uintptr, path string, name string, data []byte) error {
+ ptr, nbytes := bytePtrFromSlice(data)
+ /*
+ ssize_t extattr_set_file(
+ const char *path,
+ int attrnamespace,
+ const char *attrname,
+ const void *data,
+ size_t nbytes
+ );
+
+ ssize_t extattr_set_link(
+ const char *path,
+ int attrnamespace,
+ const char *attrname,
+ const void *data,
+ size_t nbytes
+ );
+ */
+ r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
+ EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
+ uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0)
+ if err != syscall.Errno(0) {
+ return err
+ }
+ if int(r0) != nbytes {
+ return syscall.E2BIG
+ }
+ return nil
+}
+
+func removexattr(path string, name string) error {
+ return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name)
+}
+
+func lremovexattr(path string, name string) error {
+ return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name)
+}
+
+func fremovexattr(f *os.File, name string) error {
+ return removexattr(f.Name(), name)
+}
+
+// sysSet is called by removexattr and lremovexattr with the appropriate syscall
+// number. This works because syscalls have the same signature and return
+// values.
+func sysRemove(syscallNum uintptr, path string, name string) error {
+ /*
+ int extattr_delete_file(
+ const char *path,
+ int attrnamespace,
+ const char *attrname
+ );
+
+ int extattr_delete_link(
+ const char *path,
+ int attrnamespace,
+ const char *attrname
+ );
+ */
+ _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
+ EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))),
+ )
+ if err != syscall.Errno(0) {
+ return err
+ }
+ return nil
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data)
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data)
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ return listxattr(f.Name(), data)
+}
+
+// sysSet is called by listxattr and llistxattr with the appropriate syscall
+// number. This works because syscalls have the same signature and return
+// values.
+func sysList(syscallNum uintptr, path string, data []byte) (int, error) {
+ ptr, nbytes := bytePtrFromSlice(data)
+ /*
+ ssize_t extattr_list_file(
+ const char *path,
+ int attrnamespace,
+ void *data,
+ size_t nbytes
+ );
+
+ ssize_t extattr_list_link(
+ const char *path,
+ int attrnamespace,
+ void *data,
+ size_t nbytes
+ );
+ */
+ r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))),
+ EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0)
+ if err != syscall.Errno(0) {
+ return int(r0), err
+ }
+ return int(r0), nil
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On FreeBSD, each entry consists of a single byte containing the length
+// of the attribute name, followed by the attribute name.
+// The name is _not_ terminated by NULL.
+func stringsFromByteSlice(buf []byte) (result []string) {
+ index := 0
+ for index < len(buf) {
+ next := index + 1 + int(buf[index])
+ result = append(result, string(buf[index+1:next]))
+ index = next
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go
new file mode 100644
index 0000000000000..ee7a501dae5cb
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_darwin.go
@@ -0,0 +1,90 @@
+//go:build darwin
+// +build darwin
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html
+const (
+ // XATTR_SUPPORTED will be true if the current platform is supported
+ XATTR_SUPPORTED = true
+
+ XATTR_NOFOLLOW = 0x0001
+ XATTR_CREATE = 0x0002
+ XATTR_REPLACE = 0x0004
+ XATTR_NOSECURITY = 0x0008
+ XATTR_NODEFAULT = 0x0010
+ XATTR_SHOWCOMPRESSION = 0x0020
+
+ // ENOATTR is not exported by the syscall package on Linux, because it is
+ // an alias for ENODATA. We export it here so it is available on all
+ // our supported platforms.
+ ENOATTR = syscall.ENOATTR
+)
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ return unix.Getxattr(path, name, data)
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ return unix.Lgetxattr(path, name, data)
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ return getxattr(f.Name(), name, data)
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ return unix.Setxattr(path, name, data, flags)
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return unix.Lsetxattr(path, name, data, flags)
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ return setxattr(f.Name(), name, data, flags)
+}
+
+func removexattr(path string, name string) error {
+ return unix.Removexattr(path, name)
+}
+
+func lremovexattr(path string, name string) error {
+ return unix.Lremovexattr(path, name)
+}
+
+func fremovexattr(f *os.File, name string) error {
+ return removexattr(f.Name(), name)
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ return unix.Listxattr(path, data)
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ return unix.Llistxattr(path, data)
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ return listxattr(f.Name(), data)
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On Darwin and Linux, each entry is a NULL-terminated string.
+func stringsFromByteSlice(buf []byte) (result []string) {
+ offset := 0
+ for index, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[offset:index]))
+ offset = index + 1
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go
new file mode 100644
index 0000000000000..879085ee5d453
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_linux.go
@@ -0,0 +1,142 @@
+//go:build linux
+// +build linux
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // XATTR_SUPPORTED will be true if the current platform is supported
+ XATTR_SUPPORTED = true
+
+ XATTR_CREATE = unix.XATTR_CREATE
+ XATTR_REPLACE = unix.XATTR_REPLACE
+
+ // ENOATTR is not exported by the syscall package on Linux, because it is
+ // an alias for ENODATA. We export it here so it is available on all
+ // our supported platforms.
+ ENOATTR = syscall.ENODATA
+)
+
+// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system
+// calls. This function works around this by retrying system calls until they
+// stop returning EINTR.
+//
+// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce.
+func ignoringEINTR(fn func() error) (err error) {
+ for {
+ err = fn()
+ if err != unix.EINTR {
+ break
+ }
+ }
+ return err
+}
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Getxattr(path, name, data)
+ return err
+ })
+ return r, err
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Lgetxattr(path, name, data)
+ return err
+ })
+ return r, err
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Fgetxattr(int(f.Fd()), name, data)
+ return err
+ })
+ return r, err
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Setxattr(path, name, data, flags)
+ })
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Lsetxattr(path, name, data, flags)
+ })
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Fsetxattr(int(f.Fd()), name, data, flags)
+ })
+}
+
+func removexattr(path string, name string) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Removexattr(path, name)
+ })
+}
+
+func lremovexattr(path string, name string) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Lremovexattr(path, name)
+ })
+}
+
+func fremovexattr(f *os.File, name string) error {
+ return ignoringEINTR(func() (err error) {
+ return unix.Fremovexattr(int(f.Fd()), name)
+ })
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Listxattr(path, data)
+ return err
+ })
+ return r, err
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Llistxattr(path, data)
+ return err
+ })
+ return r, err
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ var r int
+ err := ignoringEINTR(func() (err error) {
+ r, err = unix.Flistxattr(int(f.Fd()), data)
+ return err
+ })
+ return r, err
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// On Darwin and Linux, each entry is a NULL-terminated string.
+func stringsFromByteSlice(buf []byte) (result []string) {
+ offset := 0
+ for index, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[offset:index]))
+ offset = index + 1
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go
new file mode 100644
index 0000000000000..7c98b4afbac25
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_solaris.go
@@ -0,0 +1,175 @@
+//go:build solaris
+// +build solaris
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // XATTR_SUPPORTED will be true if the current platform is supported
+ XATTR_SUPPORTED = true
+
+ XATTR_CREATE = 0x1
+ XATTR_REPLACE = 0x2
+
+ // ENOATTR is not exported by the syscall package on Linux, because it is
+ // an alias for ENODATA. We export it here so it is available on all
+ // our supported platforms.
+ ENOATTR = syscall.ENODATA
+)
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ f, err := openNonblock(path)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ _ = f.Close()
+ }()
+ return fgetxattr(f, name, data)
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ return 0, unix.ENOTSUP
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ _ = unix.Close(fd)
+ }()
+ return unix.Read(fd, data)
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ f, err := openNonblock(path)
+ if err != nil {
+ return err
+ }
+ err = fsetxattr(f, name, data, flags)
+ if err != nil {
+ _ = f.Close()
+ return err
+ }
+ return f.Close()
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return unix.ENOTSUP
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ mode := unix.O_WRONLY | unix.O_XATTR
+ if flags&XATTR_REPLACE != 0 {
+ mode |= unix.O_TRUNC
+ } else if flags&XATTR_CREATE != 0 {
+ mode |= unix.O_CREAT | unix.O_EXCL
+ } else {
+ mode |= unix.O_CREAT | unix.O_TRUNC
+ }
+ fd, err := unix.Openat(int(f.Fd()), name, mode, 0666)
+ if err != nil {
+ return err
+ }
+ if _, err = unix.Write(fd, data); err != nil {
+ _ = unix.Close(fd)
+ return err
+ }
+ return unix.Close(fd)
+}
+
+func removexattr(path string, name string) error {
+ mode := unix.O_RDONLY | unix.O_XATTR | unix.O_NONBLOCK | unix.O_CLOEXEC
+ fd, err := unix.Open(path, mode, 0)
+ if err != nil {
+ return err
+ }
+ f := os.NewFile(uintptr(fd), path)
+ defer func() {
+ _ = f.Close()
+ }()
+ return fremovexattr(f, name)
+}
+
+func lremovexattr(path string, name string) error {
+ return unix.ENOTSUP
+}
+
+func fremovexattr(f *os.File, name string) error {
+ fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = unix.Close(fd)
+ }()
+ return unix.Unlinkat(fd, name, 0)
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ f, err := openNonblock(path)
+ if err != nil {
+ return 0, err
+ }
+ defer func() {
+ _ = f.Close()
+ }()
+ return flistxattr(f, data)
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ return 0, unix.ENOTSUP
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0)
+ if err != nil {
+ return 0, unix.ENOTSUP
+ }
+ xf := os.NewFile(uintptr(fd), f.Name())
+ defer func() {
+ _ = xf.Close()
+ }()
+ names, err := xf.Readdirnames(-1)
+ if err != nil {
+ return 0, err
+ }
+ var buf []byte
+ for _, name := range names {
+ buf = append(buf, append([]byte(name), '\000')...)
+ }
+ if data == nil {
+ return len(buf), nil
+ }
+ return copy(data, buf), nil
+}
+
+// Like os.Open, but passes O_NONBLOCK to the open(2) syscall.
+func openNonblock(path string) (*os.File, error) {
+ fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC|unix.O_NONBLOCK, 0)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), err
+}
+
+// stringsFromByteSlice converts a sequence of attributes to a []string.
+// We simulate Linux/Darwin, where each entry is a NULL-terminated string.
+func stringsFromByteSlice(buf []byte) (result []string) {
+ offset := 0
+ for index, b := range buf {
+ if b == 0 {
+ result = append(result, string(buf[offset:index]))
+ offset = index + 1
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go
new file mode 100644
index 0000000000000..8886fbdc4216e
--- /dev/null
+++ b/vendor/github.com/pkg/xattr/xattr_unsupported.go
@@ -0,0 +1,70 @@
+//go:build !linux && !freebsd && !netbsd && !darwin && !solaris
+// +build !linux,!freebsd,!netbsd,!darwin,!solaris
+
+package xattr
+
+import (
+ "os"
+ "syscall"
+)
+
+const (
+ // We need to use the default for non supported operating systems
+ ENOATTR = syscall.Errno(0x59)
+)
+
+// XATTR_SUPPORTED will be true if the current platform is supported
+const XATTR_SUPPORTED = false
+
+func getxattr(path string, name string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func lgetxattr(path string, name string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func fgetxattr(f *os.File, name string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func setxattr(path string, name string, data []byte, flags int) error {
+ return nil
+}
+
+func lsetxattr(path string, name string, data []byte, flags int) error {
+ return nil
+}
+
+func fsetxattr(f *os.File, name string, data []byte, flags int) error {
+ return nil
+}
+
+func removexattr(path string, name string) error {
+ return nil
+}
+
+func lremovexattr(path string, name string) error {
+ return nil
+}
+
+func fremovexattr(f *os.File, name string) error {
+ return nil
+}
+
+func listxattr(path string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func llistxattr(path string, data []byte) (int, error) {
+ return 0, nil
+}
+
+func flistxattr(f *os.File, data []byte) (int, error) {
+ return 0, nil
+}
+
+// dummy
+func stringsFromByteSlice(buf []byte) (result []string) {
+ return []string{}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE b/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE
new file mode 100644
index 0000000000000..261eeb9e9f8b2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md b/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md
new file mode 100644
index 0000000000000..ec35080b4ec3a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/README.md
@@ -0,0 +1,76 @@
+# GCP Resource detector
+
+The GCP resource detector supports detecting resources on:
+
+ * Google Compute Engine (GCE)
+ * Google Kubernetes Engine (GKE)
+ * Google App Engine (GAE)
+ * Cloud Run
+ * Cloud Run jobs
+ * Cloud Functions
+
+## Usage
+
+```golang
+ctx := context.Background()
+// Detect your resources
+res, err := resource.New(ctx,
+ // Use the GCP resource detector!
+ resource.WithDetectors(gcp.NewDetector()),
+ // Keep the default detectors
+ resource.WithTelemetrySDK(),
+ // Add your own custom attributes to identify your application
+ resource.WithAttributes(
+ semconv.ServiceNameKey.String("my-application"),
+ semconv.ServiceNamespaceKey.String("my-company-frontend-team"),
+ ),
+)
+if err != nil {
+ // Handle err
+}
+// Use the resource in your tracerprovider (or meterprovider)
+tp := trace.NewTracerProvider(
+ // ... other options
+ trace.WithResource(res),
+)
+```
+
+## Setting Kubernetes attributes
+
+Previous iterations of GCP resource detection attempted to detect
+`container.name`, `k8s.pod.name` and `k8s.namespace.name`. When using this detector,
+you should use this in your Pod Spec to set these using
+[`OTEL_RESOURCE_ATTRIBUTES`](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable):
+
+```yaml
+env:
+- name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+- name: NAMESPACE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+- name: CONTAINER_NAME
+ value: my-container-name
+- name: OTEL_RESOURCE_ATTRIBUTES
+ value: k8s.pod.name=$(POD_NAME),k8s.namespace.name=$(NAMESPACE_NAME),k8s.container.name=$(CONTAINER_NAME)
+```
+To have a detector unpack the `OTEL_RESOURCE_ATTRIBUTES` envvar, use the `WithFromEnv` option:
+
+```golang
+...
+// Detect your resources
+res, err := resource.New(ctx,
+ resource.WithDetectors(gcp.NewDetector()),
+ resource.WithTelemetrySDK(),
+ resource.WithFromEnv(), // unpacks OTEL_RESOURCE_ATTRIBUTES
+ // Add your own custom attributes to identify your application
+ resource.WithAttributes(
+ semconv.ServiceNameKey.String("my-application"),
+ semconv.ServiceNamespaceKey.String("my-company-frontend-team"),
+ ),
+)
+...
+```
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go
new file mode 100644
index 0000000000000..1c1490b02c573
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-function.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "os"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+const (
+ gcpFunctionNameKey = "K_SERVICE"
+)
+
+// NewCloudFunction will return a GCP Cloud Function resource detector.
+//
+// Deprecated: Use gcp.NewDetector() instead, which sets the same resource attributes.
+func NewCloudFunction() resource.Detector {
+ return &cloudFunction{
+ cloudRun: NewCloudRun(),
+ }
+}
+
+// cloudFunction collects resource information of GCP Cloud Function.
+type cloudFunction struct {
+ cloudRun *CloudRun
+}
+
+// Detect detects associated resources when running in GCP Cloud Function.
+func (f *cloudFunction) Detect(ctx context.Context) (*resource.Resource, error) {
+ functionName, ok := f.googleCloudFunctionName()
+ if !ok {
+ return nil, nil
+ }
+
+ projectID, err := f.cloudRun.mc.ProjectID()
+ if err != nil {
+ return nil, err
+ }
+ region, err := f.cloudRun.cloudRegion()
+ if err != nil {
+ return nil, err
+ }
+
+ attributes := []attribute.KeyValue{
+ semconv.CloudProviderGCP,
+ semconv.CloudPlatformGCPCloudFunctions,
+ semconv.FaaSName(functionName),
+ semconv.CloudAccountID(projectID),
+ semconv.CloudRegion(region),
+ }
+ return resource.NewWithAttributes(semconv.SchemaURL, attributes...), nil
+}
+
+func (f *cloudFunction) googleCloudFunctionName() (string, bool) {
+ return os.LookupEnv(gcpFunctionNameKey)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go
new file mode 100644
index 0000000000000..7754b466838b8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/cloud-run.go
@@ -0,0 +1,114 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+const serviceNamespace = "cloud-run-managed"
+
+// The minimal list of metadata.Client methods we use. Use an interface so we
+// can replace it with a fake implementation in the unit test.
+type metadataClient interface {
+ ProjectID() (string, error)
+ Get(string) (string, error)
+ InstanceID() (string, error)
+}
+
+// CloudRun collects resource information of Cloud Run instance.
+//
+// Deprecated: Use gcp.NewDetector() instead. Note that it sets faas.* resource attributes instead of service.* attributes.
+type CloudRun struct {
+ mc metadataClient
+ onGCE func() bool
+ getenv func(string) string
+}
+
+// compile time assertion that CloudRun implements the resource.Detector
+// interface.
+var _ resource.Detector = (*CloudRun)(nil)
+
+// NewCloudRun creates a CloudRun detector.
+//
+// Deprecated: Use gcp.NewDetector() instead. Note that it sets faas.* resource attributes instead of service.* attributes.
+func NewCloudRun() *CloudRun {
+ return &CloudRun{
+ mc: metadata.NewClient(nil),
+ onGCE: metadata.OnGCE,
+ getenv: os.Getenv,
+ }
+}
+
+func (c *CloudRun) cloudRegion() (string, error) {
+ region, err := c.mc.Get("instance/region")
+ if err != nil {
+ return "", err
+ }
+ // Region from the metadata server is in the format /projects/123/regions/r.
+ // https://cloud.google.com/run/docs/reference/container-contract#metadata-server
+ return region[strings.LastIndex(region, "/")+1:], nil
+}
+
+// Detect detects associated resources when running on Cloud Run hosts.
+// NOTE: the service.namespace attribute is currently hardcoded to be
+// "cloud-run-managed". This may change in the future, please do not rely on
+// this behavior yet.
+func (c *CloudRun) Detect(ctx context.Context) (*resource.Resource, error) {
+ // .OnGCE is actually testing whether the metadata server is available.
+ // Metadata server is supported on Cloud Run.
+ if !c.onGCE() {
+ return nil, nil
+ }
+
+ attributes := []attribute.KeyValue{
+ semconv.CloudProviderGCP,
+ semconv.ServiceNamespace(serviceNamespace),
+ }
+
+ var errInfo []string
+
+ if projectID, err := c.mc.ProjectID(); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if projectID != "" {
+ attributes = append(attributes, semconv.CloudAccountID(projectID))
+ }
+
+ if region, err := c.cloudRegion(); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if region != "" {
+ attributes = append(attributes, semconv.CloudRegion(region))
+ }
+
+ if instanceID, err := c.mc.InstanceID(); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if instanceID != "" {
+ attributes = append(attributes, semconv.ServiceInstanceID(instanceID))
+ }
+
+ // Part of Cloud Run container runtime contract.
+ // See https://cloud.google.com/run/docs/reference/container-contract
+ if service := c.getenv("K_SERVICE"); service == "" {
+ errInfo = append(errInfo, "envvar K_SERVICE contains empty string.")
+ } else {
+ attributes = append(attributes, semconv.ServiceName(service))
+ }
+ res := resource.NewWithAttributes(semconv.SchemaURL, attributes...)
+
+ var aggregatedErr error
+ if len(errInfo) > 0 {
+ aggregatedErr = fmt.Errorf("detecting Cloud Run resources: %s", errInfo)
+ }
+
+ return res, aggregatedErr
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go
new file mode 100644
index 0000000000000..b9eb1e1e14958
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/detector.go
@@ -0,0 +1,153 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "cloud.google.com/go/compute/metadata"
+ "github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+// NewDetector returns a resource detector which detects resource attributes on:
+// * Google Compute Engine (GCE).
+// * Google Kubernetes Engine (GKE).
+// * Google App Engine (GAE).
+// * Cloud Run.
+// * Cloud Functions.
+func NewDetector() resource.Detector {
+ return &detector{detector: gcp.NewDetector()}
+}
+
+type detector struct {
+ detector gcpDetector
+}
+
+// Detect detects associated resources when running on GCE, GKE, GAE,
+// Cloud Run, and Cloud functions.
+func (d *detector) Detect(ctx context.Context) (*resource.Resource, error) {
+ if !metadata.OnGCE() {
+ return nil, nil
+ }
+ b := &resourceBuilder{}
+ b.attrs = append(b.attrs, semconv.CloudProviderGCP)
+ b.add(semconv.CloudAccountIDKey, d.detector.ProjectID)
+
+ switch d.detector.CloudPlatform() {
+ case gcp.GKE:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPKubernetesEngine)
+ b.addZoneOrRegion(d.detector.GKEAvailabilityZoneOrRegion)
+ b.add(semconv.K8SClusterNameKey, d.detector.GKEClusterName)
+ b.add(semconv.HostIDKey, d.detector.GKEHostID)
+ case gcp.CloudRun:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudRun)
+ b.add(semconv.FaaSNameKey, d.detector.FaaSName)
+ b.add(semconv.FaaSVersionKey, d.detector.FaaSVersion)
+ b.add(semconv.FaaSInstanceKey, d.detector.FaaSID)
+ b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion)
+ case gcp.CloudRunJob:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudRun)
+ b.add(semconv.FaaSNameKey, d.detector.FaaSName)
+ b.add(semconv.FaaSInstanceKey, d.detector.FaaSID)
+ b.add(semconv.GCPCloudRunJobExecutionKey, d.detector.CloudRunJobExecution)
+ b.addInt(semconv.GCPCloudRunJobTaskIndexKey, d.detector.CloudRunJobTaskIndex)
+ b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion)
+ case gcp.CloudFunctions:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPCloudFunctions)
+ b.add(semconv.FaaSNameKey, d.detector.FaaSName)
+ b.add(semconv.FaaSVersionKey, d.detector.FaaSVersion)
+ b.add(semconv.FaaSInstanceKey, d.detector.FaaSID)
+ b.add(semconv.CloudRegionKey, d.detector.FaaSCloudRegion)
+ case gcp.AppEngineFlex:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPAppEngine)
+ b.addZoneAndRegion(d.detector.AppEngineFlexAvailabilityZoneAndRegion)
+ b.add(semconv.FaaSNameKey, d.detector.AppEngineServiceName)
+ b.add(semconv.FaaSVersionKey, d.detector.AppEngineServiceVersion)
+ b.add(semconv.FaaSInstanceKey, d.detector.AppEngineServiceInstance)
+ case gcp.AppEngineStandard:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPAppEngine)
+ b.add(semconv.CloudAvailabilityZoneKey, d.detector.AppEngineStandardAvailabilityZone)
+ b.add(semconv.CloudRegionKey, d.detector.AppEngineStandardCloudRegion)
+ b.add(semconv.FaaSNameKey, d.detector.AppEngineServiceName)
+ b.add(semconv.FaaSVersionKey, d.detector.AppEngineServiceVersion)
+ b.add(semconv.FaaSInstanceKey, d.detector.AppEngineServiceInstance)
+ case gcp.GCE:
+ b.attrs = append(b.attrs, semconv.CloudPlatformGCPComputeEngine)
+ b.addZoneAndRegion(d.detector.GCEAvailabilityZoneAndRegion)
+ b.add(semconv.HostTypeKey, d.detector.GCEHostType)
+ b.add(semconv.HostIDKey, d.detector.GCEHostID)
+ b.add(semconv.HostNameKey, d.detector.GCEHostName)
+ b.add(semconv.GCPGceInstanceNameKey, d.detector.GCEInstanceName)
+ b.add(semconv.GCPGceInstanceHostnameKey, d.detector.GCEInstanceHostname)
+ default:
+ // We don't support this platform yet, so just return with what we have
+ }
+ return b.build()
+}
+
+// resourceBuilder simplifies constructing resources using GCP detection
+// library functions.
+type resourceBuilder struct {
+ errs []error
+ attrs []attribute.KeyValue
+}
+
+func (r *resourceBuilder) add(key attribute.Key, detect func() (string, error)) {
+ if v, err := detect(); err == nil {
+ r.attrs = append(r.attrs, key.String(v))
+ } else {
+ r.errs = append(r.errs, err)
+ }
+}
+
+func (r *resourceBuilder) addInt(key attribute.Key, detect func() (string, error)) {
+ if v, err := detect(); err == nil {
+ if vi, err := strconv.Atoi(v); err == nil {
+ r.attrs = append(r.attrs, key.Int(vi))
+ } else {
+ r.errs = append(r.errs, err)
+ }
+ } else {
+ r.errs = append(r.errs, err)
+ }
+}
+
+// zoneAndRegion functions are expected to return zone, region, err.
+func (r *resourceBuilder) addZoneAndRegion(detect func() (string, string, error)) {
+ if zone, region, err := detect(); err == nil {
+ r.attrs = append(r.attrs, semconv.CloudAvailabilityZone(zone))
+ r.attrs = append(r.attrs, semconv.CloudRegion(region))
+ } else {
+ r.errs = append(r.errs, err)
+ }
+}
+
+func (r *resourceBuilder) addZoneOrRegion(detect func() (string, gcp.LocationType, error)) {
+ if v, locType, err := detect(); err == nil {
+ switch locType {
+ case gcp.Zone:
+ r.attrs = append(r.attrs, semconv.CloudAvailabilityZone(v))
+ case gcp.Region:
+ r.attrs = append(r.attrs, semconv.CloudRegion(v))
+ default:
+ r.errs = append(r.errs, fmt.Errorf("location must be zone or region. Got %v", locType))
+ }
+ } else {
+ r.errs = append(r.errs, err)
+ }
+}
+
+func (r *resourceBuilder) build() (*resource.Resource, error) {
+ var err error
+ if len(r.errs) > 0 {
+ err = fmt.Errorf("%w: %s", resource.ErrPartialResource, r.errs)
+ }
+ return resource.NewWithAttributes(semconv.SchemaURL, r.attrs...), err
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go
new file mode 100644
index 0000000000000..2a29c420b498e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gce.go
@@ -0,0 +1,100 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+// GCE collects resource information of GCE computing instances.
+//
+// Deprecated: Use gcp.NewDetector() instead, which sets the same resource attributes on GCE.
+type GCE struct{}
+
+// compile time assertion that GCE implements the resource.Detector interface.
+var _ resource.Detector = (*GCE)(nil)
+
+// Detect detects associated resources when running on GCE hosts.
+func (gce *GCE) Detect(ctx context.Context) (*resource.Resource, error) {
+ if !metadata.OnGCE() {
+ return nil, nil
+ }
+
+ attributes := []attribute.KeyValue{
+ semconv.CloudProviderGCP,
+ }
+
+ var errInfo []string
+
+ if projectID, err := metadata.ProjectIDWithContext(ctx); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if projectID != "" {
+ attributes = append(attributes, semconv.CloudAccountID(projectID))
+ }
+
+ if zone, err := metadata.ZoneWithContext(ctx); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if zone != "" {
+ attributes = append(attributes, semconv.CloudAvailabilityZone(zone))
+
+ splitArr := strings.SplitN(zone, "-", 3)
+ if len(splitArr) == 3 {
+ attributes = append(attributes, semconv.CloudRegion(strings.Join(splitArr[0:2], "-")))
+ }
+ }
+
+ if instanceID, err := metadata.InstanceIDWithContext(ctx); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if instanceID != "" {
+ attributes = append(attributes, semconv.HostID(instanceID))
+ }
+
+ if name, err := metadata.InstanceNameWithContext(ctx); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if name != "" {
+ attributes = append(attributes, semconv.HostName(name))
+ }
+
+ if hostname, err := os.Hostname(); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if hostname != "" {
+ attributes = append(attributes, semconv.HostName(hostname))
+ }
+
+ if hostType, err := metadata.GetWithContext(ctx, "instance/machine-type"); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if hostType != "" {
+ attributes = append(attributes, semconv.HostType(hostType))
+ }
+
+ var aggregatedErr error
+ if len(errInfo) > 0 {
+ aggregatedErr = fmt.Errorf("detecting GCE resources: %s", errInfo)
+ }
+
+ return resource.NewWithAttributes(semconv.SchemaURL, attributes...), aggregatedErr
+}
+
+// hasProblem checks if the err is not nil or for missing resources.
+func hasProblem(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ var nde metadata.NotDefinedError
+ if undefined := errors.As(err, &nde); undefined {
+ return false
+ }
+ return true
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go
new file mode 100644
index 0000000000000..0588ad6a48526
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/gke.go
@@ -0,0 +1,69 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "cloud.google.com/go/compute/metadata"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+)
+
+// GKE collects resource information of GKE computing instances.
+//
+// Deprecated: Use gcp.NewDetector() instead, which does NOT detect container, pod, and namespace attributes.
+// Set those using name using the OTEL_RESOURCE_ATTRIBUTES env var instead.
+type GKE struct{}
+
+// compile time assertion that GKE implements the resource.Detector interface.
+var _ resource.Detector = (*GKE)(nil)
+
+// Detect detects associated resources when running in GKE environment.
+func (gke *GKE) Detect(ctx context.Context) (*resource.Resource, error) {
+ gcpDetecor := GCE{}
+ gceLablRes, err := gcpDetecor.Detect(ctx)
+
+ if os.Getenv("KUBERNETES_SERVICE_HOST") == "" {
+ return gceLablRes, err
+ }
+
+ var errInfo []string
+ if err != nil {
+ errInfo = append(errInfo, err.Error())
+ }
+
+ attributes := []attribute.KeyValue{
+ semconv.K8SNamespaceName(os.Getenv("NAMESPACE")),
+ semconv.K8SPodName(os.Getenv("HOSTNAME")),
+ }
+
+ if containerName := os.Getenv("CONTAINER_NAME"); containerName != "" {
+ attributes = append(attributes, semconv.ContainerName(containerName))
+ }
+
+ if clusterName, err := metadata.InstanceAttributeValueWithContext(ctx, "cluster-name"); hasProblem(err) {
+ errInfo = append(errInfo, err.Error())
+ } else if clusterName != "" {
+ attributes = append(attributes, semconv.K8SClusterName(clusterName))
+ }
+
+ k8sattributeRes := resource.NewWithAttributes(semconv.SchemaURL, attributes...)
+
+ res, err := resource.Merge(gceLablRes, k8sattributeRes)
+ if err != nil {
+ errInfo = append(errInfo, err.Error())
+ }
+
+ var aggregatedErr error
+ if len(errInfo) > 0 {
+ aggregatedErr = fmt.Errorf("detecting GKE resources: %s", errInfo)
+ }
+
+ return res, aggregatedErr
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go
new file mode 100644
index 0000000000000..666d82e616c38
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/types.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+import "github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp"
+
+// gcpDetector can detect attributes of GCP environments.
+type gcpDetector interface {
+ ProjectID() (string, error)
+ CloudPlatform() gcp.Platform
+ GKEAvailabilityZoneOrRegion() (string, gcp.LocationType, error)
+ GKEClusterName() (string, error)
+ GKEHostID() (string, error)
+ FaaSName() (string, error)
+ FaaSVersion() (string, error)
+ FaaSID() (string, error)
+ FaaSCloudRegion() (string, error)
+ AppEngineFlexAvailabilityZoneAndRegion() (string, string, error)
+ AppEngineStandardAvailabilityZone() (string, error)
+ AppEngineStandardCloudRegion() (string, error)
+ AppEngineServiceName() (string, error)
+ AppEngineServiceVersion() (string, error)
+ AppEngineServiceInstance() (string, error)
+ GCEAvailabilityZoneAndRegion() (string, string, error)
+ GCEHostType() (string, error)
+ GCEHostID() (string, error)
+ GCEHostName() (string, error)
+ GCEInstanceHostname() (string, error)
+ GCEInstanceName() (string, error)
+ CloudRunJobExecution() (string, error)
+ CloudRunJobTaskIndex() (string, error)
+}
diff --git a/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go
new file mode 100644
index 0000000000000..1acc898319839
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/detectors/gcp/version.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package gcp // import "go.opentelemetry.io/contrib/detectors/gcp"
+
+// Version is the current release version of the GCP resource detector.
+func Version() string {
+ return "1.29.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
new file mode 100644
index 0000000000000..0b6cbe960cbab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.24.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
new file mode 100644
index 0000000000000..6e688345cbb98
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go
@@ -0,0 +1,4387 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Describes FaaS attributes.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: experimental
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the identifies the class / type of
+ // event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'browser.mouse.click', 'device.app.lifecycle'
+ // Note: Event names are subject to the same rules as [attribute
+ // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md).
+ // Notably, event names are namespaced to avoid collisions and provide a
+ // clean separation of semantics for events in separate domains like
+ // browser, mobile, and kubernetes.
+ EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// Describes Database attributes
+const (
+ // PoolNameKey is the attribute Key conforming to the "pool.name" semantic
+ // conventions. It represents the name of the connection pool; unique
+ // within the instrumented application. In case the connection pool
+ // implementation doesn't provide a name, then the
+ // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+ // should be used
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'myDataSource'
+ PoolNameKey = attribute.Key("pool.name")
+
+ // StateKey is the attribute Key conforming to the "state" semantic
+ // conventions. It represents the state of a connection in the pool
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'idle'
+ StateKey = attribute.Key("state")
+)
+
+var (
+ // idle
+ StateIdle = StateKey.String("idle")
+ // used
+ StateUsed = StateKey.String("used")
+)
+
+// PoolName returns an attribute KeyValue conforming to the "pool.name"
+// semantic conventions. It represents the name of the connection pool; unique
+// within the instrumented application. In case the connection pool
+// implementation doesn't provide a name, then the
+// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+// should be used
+func PoolName(val string) attribute.KeyValue {
+ return PoolNameKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+ // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+ // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+ // represents the full type name of the
+ // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+ // implementation that handled the exception.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if and only if the exception
+ // was handled by this handler.)
+ // Stability: experimental
+ // Examples: 'Contoso.MyHandler'
+ AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+ // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+ // the rate limiting policy name.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if the matched endpoint for the
+ // request had a rate-limiting policy.)
+ // Stability: experimental
+ // Examples: 'fixed', 'sliding', 'token'
+ AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+ // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.result" semantic conventions. It represents
+ // the rate-limiting result, shows whether the lease was acquired or
+ // contains a rejection reason
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'acquired', 'request_canceled'
+ AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+ // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+ // "aspnetcore.request.is_unhandled" semantic conventions. It represents
+ // the flag indicating if request was handled by the application pipeline.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (if and only if the request was
+ // not handled.)
+ // Stability: experimental
+ // Examples: True
+ AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+ // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+ // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+ // value that indicates whether the matched route is a fallback route.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If and only if a route was
+ // successfully matched.)
+ // Stability: experimental
+ // Examples: True
+ AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+)
+
+var (
+ // Lease was acquired
+ AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+ // Lease request was rejected by the endpoint limiter
+ AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+ // Lease request was rejected by the global limiter
+ AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+ // Lease request was canceled
+ AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+ return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+ return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+ return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+ return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// SignalR attributes
+const (
+ // SignalrConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the
+ // signalR HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'app_shutdown', 'timeout'
+ SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalrTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the [SignalR
+ // transport
+ // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'web_sockets', 'long_polling'
+ SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+ // The connection was closed normally
+ SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout
+ SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down
+ SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+ // ServerSentEvents protocol
+ SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+ // WebSockets protocol
+ SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// Describes JVM buffer metric attributes.
+const (
+ // JvmBufferPoolNameKey is the attribute Key conforming to the
+ // "jvm.buffer.pool.name" semantic conventions. It represents the name of
+ // the buffer pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'mapped', 'direct'
+ // Note: Pool names are generally obtained via
+ // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+ JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+ return JvmBufferPoolNameKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+ // JvmMemoryPoolNameKey is the attribute Key conforming to the
+ // "jvm.memory.pool.name" semantic conventions. It represents the name of
+ // the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+ // JvmMemoryTypeKey is the attribute Key conforming to the
+ // "jvm.memory.type" semantic conventions. It represents the type of
+ // memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+)
+
+var (
+ // Heap memory
+ JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+ // Non-heap memory
+ JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+ return JvmMemoryPoolNameKey.String(val)
+}
+
+// Describes System metric attributes
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '(identifier)'
+ SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU metric attributes
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // logical CPU number [0..n-1]
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemCPUStateKey is the attribute Key conforming to the
+ // "system.cpu.state" semantic conventions. It represents the state of the
+ // CPU
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle', 'interrupt'
+ SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+ // user
+ SystemCPUStateUser = SystemCPUStateKey.String("user")
+ // system
+ SystemCPUStateSystem = SystemCPUStateKey.String("system")
+ // nice
+ SystemCPUStateNice = SystemCPUStateKey.String("nice")
+ // idle
+ SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+ // iowait
+ SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+ // interrupt
+ SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+ // steal
+ SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory metric attributes
+const (
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory
+ // state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free', 'cached'
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+ // used
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // shared
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging metric attributes
+const (
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'in'
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory
+ // paging state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free'
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory
+ // paging type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'minor'
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+ // in
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+ // used
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+ // major
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem metric attributes
+const (
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the
+ // filesystem mode
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'rw, ro'
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/mnt/data'
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the
+ // filesystem state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'used'
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the
+ // filesystem type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ext4'
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+ // used
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+ // fat32
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network metric attributes
+const (
+ // SystemNetworkStateKey is the attribute Key conforming to the
+ // "system.network.state" semantic conventions. It represents a stateless
+ // protocol MUST NOT set this attribute
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'close_wait'
+ SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+ // close
+ SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+ // close_wait
+ SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+ // closing
+ SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+ // delete
+ SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+ // established
+ SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+ // fin_wait_1
+ SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+ // fin_wait_2
+ SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+ // last_ack
+ SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+ // listen
+ SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+ // syn_recv
+ SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+ // syn_sent
+ SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+ // time_wait
+ SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process metric attributes
+const (
+ // SystemProcessesStatusKey is the attribute Key conforming to the
+ // "system.processes.status" semantic conventions. It represents the
+ // process state, e.g., [Linux Process State
+ // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'running'
+ SystemProcessesStatusKey = attribute.Key("system.processes.status")
+)
+
+var (
+ // running
+ SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running")
+ // sleeping
+ SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping")
+ // stopped
+ SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped")
+ // defunct
+ SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct")
+)
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent the client address
+ // behind any intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent the client port behind
+ // any intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// The attributes used to describe telemetry in the context of databases.
+const (
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary Cassandra table that the operation is acting upon, including the
+ // keyspace name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBContainerKey is the attribute Key conforming to the
+ // "db.cosmosdb.container" semantic conventions. It represents the cosmos
+ // DB container name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'anystring'
+ DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+
+ // DBElasticsearchClusterNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.cluster.name" semantic conventions. It represents the
+ // represents the identifier of an Elasticsearch cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+ DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+ // DBElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.node.name" semantic conventions. It represents the
+ // represents the human-readable identifier of the node/instance to which a
+ // request was routed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-0000000001'
+ DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+
+ // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id"
+ // semantic conventions. It represents an identifier (address, unique name,
+ // or any other identifier) of the database instance that is executing
+ // queries or mutations on the current connection. This is useful in cases
+ // where the database is running in a clustered environment and the
+ // instrumentation is able to record the node executing the query. The
+ // client may obtain this value in databases like MySQL using queries like
+ // `select @@hostname`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mysql-e26b99z.example.com'
+ DBInstanceIDKey = attribute.Key("db.instance.id")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the MongoDB
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
+ // required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary Cassandra table that the operation is acting upon, including the
+// keyspace name (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+ return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+ return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+ return DBElasticsearchNodeNameKey.String(val)
+}
+
+// DBInstanceID returns an attribute KeyValue conforming to the
+// "db.instance.id" semantic conventions. It represents an identifier (address,
+// unique name, or any other identifier) of the database instance that is
+// executing queries or mutations on the current connection. This is useful in
+// cases where the database is running in a clustered environment and the
+// instrumentation is able to record the node executing the query. The client
+// may obtain this value in databases like MySQL using queries like `select
+// @@hostname`.
+func DBInstanceID(val string) attribute.KeyValue {
+ return DBInstanceIDKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the MongoDB
+// collection being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// Describes deprecated HTTP attributes.
+const (
+ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+ // semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Deprecated: use `http.request.method` instead.
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ // Deprecated: use `http.request.header.content-length` instead.
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 3495
+ // Deprecated: use `http.response.header.content-length` instead.
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'http', 'https'
+ // Deprecated: use `url.scheme` instead.
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 200
+ // Deprecated: use `http.response.status_code` instead.
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/search?q=OpenTelemetry#SemConv'
+ // Deprecated: use `url.path` and `url.query` instead.
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Deprecated: use `url.full` instead.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPUserAgentKey is the attribute Key conforming to the
+ // "http.user_agent" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1'
+ // Deprecated: use `user_agent.original` instead.
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+)
+
+var (
+ // HTTP/1.0
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ //
+ // Deprecated: use `network.protocol.name` instead.
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions.
+//
+// Deprecated: use `http.request.method` instead.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions.
+//
+// Deprecated: use `http.request.header.content-length` instead.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions.
+//
+// Deprecated: use `http.response.header.content-length` instead.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions.
+//
+// Deprecated: use `url.scheme` instead.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions.
+//
+// Deprecated: use `http.response.status_code` instead.
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions.
+//
+// Deprecated: use `url.path` and `url.query` instead.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions.
+//
+// Deprecated: use `url.full` instead.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions.
+//
+// Deprecated: use `user_agent.original` instead.
+func HTTPUserAgent(val string) attribute.KeyValue {
+ return HTTPUserAgentKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ // Deprecated: use `server.address`.
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `server.port`.
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'example.com'
+ // Deprecated: use `server.address` on client spans and `client.address` on
+ // server spans.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `server.port` on client spans and `client.port` on
+ // server spans.
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetProtocolNameKey is the attribute Key conforming to the
+ // "net.protocol.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Deprecated: use `network.protocol.name`.
+ NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+ // NetProtocolVersionKey is the attribute Key conforming to the
+ // "net.protocol.version" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '3.1.1'
+ // Deprecated: use `network.protocol.version`.
+ NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ // Deprecated: use `network.local.address`.
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 8080
+ // Deprecated: use `network.local.port`.
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '192.168.0.1'
+ // Deprecated: use `network.peer.address`.
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '/var/my.sock'
+ // Deprecated: no replacement at this time.
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 65531
+ // Deprecated: use `network.peer.port`.
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Deprecated: use `network.transport`.
+ NetTransportKey = attribute.Key("net.transport")
+)
+
+var (
+ // IPv4 address
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ //
+ // Deprecated: use `network.transport` and `network.type`.
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+ // ip_tcp
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ //
+ // Deprecated: use `network.transport`.
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions.
+//
+// Deprecated: use `server.address`.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions.
+//
+// Deprecated: use `server.port`.
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions.
+//
+// Deprecated: use `server.address` on client spans and `client.address` on
+// server spans.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions.
+//
+// Deprecated: use `server.port` on client spans and `client.port` on server
+// spans.
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions.
+//
+// Deprecated: use `network.protocol.name`.
+func NetProtocolName(val string) attribute.KeyValue {
+ return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions.
+//
+// Deprecated: use `network.protocol.version`.
+func NetProtocolVersion(val string) attribute.KeyValue {
+ return NetProtocolVersionKey.String(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions.
+//
+// Deprecated: use `network.local.address`.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions.
+//
+// Deprecated: use `network.local.port`.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions.
+//
+// Deprecated: use `network.peer.address`.
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions.
+//
+// Deprecated: no replacement at this time.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions.
+//
+// Deprecated: use `network.peer.port`.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the
+ // destination address - domain name if available without reverse DNS
+ // lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the source side, and when communicating through
+ // an intermediary, `destination.address` SHOULD represent the destination
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the destination
+ // port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+ // DiskIoDirectionKey is the attribute Key conforming to the
+ // "disk.io.direction" semantic conventions. It represents the disk IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read'
+ DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+ // read
+ DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+ // write
+ DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report an error.
+const (
+ // ErrorTypeKey is the attribute Key conforming to the "error.type"
+ // semantic conventions. It represents the describes a class of error the
+ // operation ended with.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'timeout', 'java.net.UnknownHostException',
+ // 'server_certificate_invalid', '500'
+ // Note: The `error.type` SHOULD be predictable and SHOULD have low
+ // cardinality.
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library
+ // SHOULD be low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query
+ // time when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT
+ // set `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as
+ // HTTP or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // * Use a domain-specific attribute
+ // * Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a custom value
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example for recording span
+ // exceptions](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the
+ // ordinal number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route, that is, the path
+ // template in the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client_id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client_id")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+ // to the "messaging.destination_publish.anonymous" semantic conventions.
+ // It represents a boolean that is true if the publish message destination
+ // is anonymous (could be unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+ // MessagingDestinationPublishNameKey is the attribute Key conforming to
+ // the "messaging.destination_publish.name" semantic conventions. It
+ // represents the name of the original destination the message was
+ // published to
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: The name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the original destination name
+ // SHOULD uniquely identify the broker.
+ MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+ // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+ // It represents the ordering key for a given message. If the attribute is
+ // not present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ordering_key'
+ MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the
+ // size of the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1439
+ // Note: This can refer to both the compressed or uncompressed body size.
+ // If both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the conversation ID identifying the conversation to which the message
+ // belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents
+ // the size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2738
+ // Note: This can refer to both the compressed or uncompressed size. If
+ // both sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents an identifier for
+ // the messaging system being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+ // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+ MessagingOperationCreate = MessagingOperationKey.String("create")
+ // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs
+ MessagingOperationDeliver = MessagingOperationKey.String("deliver")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Apache ActiveMQ
+ MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid")
+ // Azure Event Hubs
+ MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs")
+ // Azure Service Bus
+ MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus")
+ // Google Cloud Pub/Sub
+ MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ MessagingSystemJms = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client_id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+ return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkIoDirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network
+ // IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'transmit'
+ NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the
+ // "network.peer.port" semantic conventions. It represents the peer port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // application layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // version of the protocol specified in `network.protocol.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `network.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`,
+ // this attribute should be set to `1.1`.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // transport layer](https://osi-model.com/transport-layer/) or
+ // [inter-process communication
+ // method](https://wikipedia.org/wiki/Inter-process_communication).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port
+ // 12345.
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI network
+ // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // transmit
+ NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+ // receive
+ NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the version
+// of the protocol specified in `network.protocol.name`.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// Attributes for remote procedure calls.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.address` SHOULD represent the server address
+ // behind any intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.port` SHOULD represent the server port behind
+ // any intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the destination side, and when communicating
+ // through an intermediary, `source.address` SHOULD represent the source
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+ // semantic conventions. It represents the string indicating the
+ // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+ // used during the current connection.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+ // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+ // Note: The values allowed for `tls.cipher` MUST be one of the
+ // `Descriptions` of the [registered TLS Cipher
+ // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the client. This is
+ // usually mutually-exclusive of `client.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the client. This is usually mutually-exclusive of
+ // `client.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the
+ // "tls.client.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based
+ // on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the
+ // date/Time indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientServerNameKey is the attribute Key conforming to the
+ // "tls.client.server_name" semantic conventions. It represents the also
+ // called an SNI, this tells the server which hostname to which the client
+ // is attempting to connect to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry.io'
+ TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the
+ // array of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the
+ // given cipher, when applicable
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'secp256r1'
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the
+ // "tls.established" semantic conventions. It represents the boolean flag
+ // indicating if the TLS negotiation was successful and transitioned to an
+ // encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the
+ // "tls.next_protocol" semantic conventions. It represents the string
+ // indicating the protocol being tunneled. Per the values in the [IANA
+ // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+ // this string should be lower case.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'http/1.1'
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the
+ // "tls.protocol.name" semantic conventions. It represents the normalized
+ // lowercase protocol name parsed from original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric
+ // part of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2', '3'
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+ // semantic conventions. It represents the boolean flag indicating if this
+ // TLS connection was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the server. This is
+ // usually mutually-exclusive of `server.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the server. This is usually mutually-exclusive of
+ // `server.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the
+ // "tls.server.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the
+ // "tls.server.ja3s" semantic conventions. It represents a hash that
+ // identifies servers based on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the
+ // date/Time indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // server.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+ // ssl
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+ return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:[email protected]/`. In such case username and
+ // password SHOULD be redacted and attribute's value SHOULD be
+ // `https://REDACTED:[email protected]/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed) and SHOULD NOT be validated or modified except for
+ // sanitizing purposes.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ URLPathKey = attribute.Key("url.path")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in query string SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+)
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id"
+ // semantic conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
new file mode 100644
index 0000000000000..d27e8a8f8b3f6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.24.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
new file mode 100644
index 0000000000000..6c019aafc3ef4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go
@@ -0,0 +1,200 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This event represents an occurrence of a lifecycle transition on the iOS
+// platform.
+const (
+ // IosStateKey is the attribute Key conforming to the "ios.state" semantic
+ // conventions. It represents the this attribute represents the state the
+ // application has transitioned into at the occurrence of the event.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate
+ // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902),
+ // and from which the `OS terminology` column values are derived.
+ IosStateKey = attribute.Key("ios.state")
+)
+
+var (
+ // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive`
+ IosStateActive = IosStateKey.String("active")
+ // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive`
+ IosStateInactive = IosStateKey.String("inactive")
+ // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground`
+ IosStateBackground = IosStateKey.String("background")
+ // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground`
+ IosStateForeground = IosStateKey.String("foreground")
+ // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate`
+ IosStateTerminate = IosStateKey.String("terminate")
+)
+
+// This event represents an occurrence of a lifecycle transition on the Android
+// platform.
+const (
+ // AndroidStateKey is the attribute Key conforming to the "android.state"
+ // semantic conventions. It represents the this attribute represents the
+ // state the application has transitioned into at the occurrence of the
+ // event.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Note: The Android lifecycle states are defined in [Activity lifecycle
+ // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+ // and from which the `OS identifiers` are derived.
+ AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+ AndroidStateCreated = AndroidStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+ AndroidStateBackground = AndroidStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+ AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: experimental
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
new file mode 100644
index 0000000000000..7235bb51d9a48
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go
new file mode 100644
index 0000000000000..a6b953f625e57
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go
@@ -0,0 +1,1071 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+const (
+
+ // DBClientConnectionsUsage is the metric conforming to the
+ // "db.client.connections.usage" semantic conventions. It represents the number
+ // of connections that are currently in state described by the `state`
+ // attribute.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsUsageName = "db.client.connections.usage"
+ DBClientConnectionsUsageUnit = "{connection}"
+ DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute"
+
+ // DBClientConnectionsIdleMax is the metric conforming to the
+ // "db.client.connections.idle.max" semantic conventions. It represents the
+ // maximum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
+ DBClientConnectionsIdleMaxUnit = "{connection}"
+ DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed"
+
+ // DBClientConnectionsIdleMin is the metric conforming to the
+ // "db.client.connections.idle.min" semantic conventions. It represents the
+ // minimum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
+ DBClientConnectionsIdleMinUnit = "{connection}"
+ DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed"
+
+ // DBClientConnectionsMax is the metric conforming to the
+ // "db.client.connections.max" semantic conventions. It represents the maximum
+ // number of open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsMaxName = "db.client.connections.max"
+ DBClientConnectionsMaxUnit = "{connection}"
+ DBClientConnectionsMaxDescription = "The maximum number of open connections allowed"
+
+ // DBClientConnectionsPendingRequests is the metric conforming to the
+ // "db.client.connections.pending_requests" semantic conventions. It represents
+ // the number of pending requests for an open connection, cumulative for the
+ // entire pool.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
+ DBClientConnectionsPendingRequestsUnit = "{request}"
+ DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
+
+ // DBClientConnectionsTimeouts is the metric conforming to the
+ // "db.client.connections.timeouts" semantic conventions. It represents the
+ // number of connection timeouts that have occurred trying to obtain a
+ // connection from the pool.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
+ DBClientConnectionsTimeoutsUnit = "{timeout}"
+ DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
+
+ // DBClientConnectionsCreateTime is the metric conforming to the
+ // "db.client.connections.create_time" semantic conventions. It represents the
+ // time it took to create a new connection.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
+ DBClientConnectionsCreateTimeUnit = "ms"
+ DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection"
+
+ // DBClientConnectionsWaitTime is the metric conforming to the
+ // "db.client.connections.wait_time" semantic conventions. It represents the
+ // time it took to obtain an open connection from the pool.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
+ DBClientConnectionsWaitTimeUnit = "ms"
+ DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool"
+
+ // DBClientConnectionsUseTime is the metric conforming to the
+ // "db.client.connections.use_time" semantic conventions. It represents the
+ // time between borrowing a connection and returning it to the pool.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsUseTimeName = "db.client.connections.use_time"
+ DBClientConnectionsUseTimeUnit = "ms"
+ DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
+
+ // AspnetcoreRoutingMatchAttempts is the metric conforming to the
+ // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
+ // number of requests that were attempted to be matched to an endpoint.
+ // Instrument: counter
+ // Unit: {match_attempt}
+ // Stability: Experimental
+ AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
+ AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
+ AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
+
+ // AspnetcoreDiagnosticsExceptions is the metric conforming to the
+ // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
+ // number of exceptions caught by exception handling middleware.
+ // Instrument: counter
+ // Unit: {exception}
+ // Stability: Experimental
+ AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
+ AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
+ AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
+
+ // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
+ // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
+ // represents the number of requests that are currently active on the server
+ // that hold a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
+ AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
+ AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
+ // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
+ // represents the duration of rate limiting lease held by requests on the
+ // server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
+ AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
+ AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
+
+ // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
+ // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
+ // represents the time the request spent in a queue waiting to acquire a rate
+ // limiting lease.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
+ AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
+ AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
+ // represents the number of requests that are currently queued, waiting to
+ // acquire a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
+ AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
+ AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
+ // number of requests that tried to acquire a rate limiting lease.
+ // Instrument: counter
+ // Unit: {request}
+ // Stability: Experimental
+ AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
+ AspnetcoreRateLimitingRequestsUnit = "{request}"
+ AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
+
+ // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
+ // semantic conventions. It represents the measures the time taken to perform a
+ // DNS lookup.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DNSLookupDurationName = "dns.lookup.duration"
+ DNSLookupDurationUnit = "s"
+ DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
+
+ // HTTPClientOpenConnections is the metric conforming to the
+ // "http.client.open_connections" semantic conventions. It represents the
+ // number of outbound HTTP connections that are currently active or idle on the
+ // client.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ HTTPClientOpenConnectionsName = "http.client.open_connections"
+ HTTPClientOpenConnectionsUnit = "{connection}"
+ HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
+
+ // HTTPClientConnectionDuration is the metric conforming to the
+ // "http.client.connection.duration" semantic conventions. It represents the
+ // duration of the successfully established outbound HTTP connections.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ HTTPClientConnectionDurationName = "http.client.connection.duration"
+ HTTPClientConnectionDurationUnit = "s"
+ HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
+
+ // HTTPClientActiveRequests is the metric conforming to the
+ // "http.client.active_requests" semantic conventions. It represents the number
+ // of active HTTP requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPClientActiveRequestsName = "http.client.active_requests"
+ HTTPClientActiveRequestsUnit = "{request}"
+ HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
+
+ // HTTPClientRequestTimeInQueue is the metric conforming to the
+ // "http.client.request.time_in_queue" semantic conventions. It represents the
+ // amount of time requests spent on a queue waiting for an available
+ // connection.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue"
+ HTTPClientRequestTimeInQueueUnit = "s"
+ HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection."
+
+ // KestrelActiveConnections is the metric conforming to the
+ // "kestrel.active_connections" semantic conventions. It represents the number
+ // of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ KestrelActiveConnectionsName = "kestrel.active_connections"
+ KestrelActiveConnectionsUnit = "{connection}"
+ KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // KestrelConnectionDuration is the metric conforming to the
+ // "kestrel.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ KestrelConnectionDurationName = "kestrel.connection.duration"
+ KestrelConnectionDurationUnit = "s"
+ KestrelConnectionDurationDescription = "The duration of connections on the server."
+
+ // KestrelRejectedConnections is the metric conforming to the
+ // "kestrel.rejected_connections" semantic conventions. It represents the
+ // number of connections rejected by the server.
+ // Instrument: counter
+ // Unit: {connection}
+ // Stability: Experimental
+ KestrelRejectedConnectionsName = "kestrel.rejected_connections"
+ KestrelRejectedConnectionsUnit = "{connection}"
+ KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
+
+ // KestrelQueuedConnections is the metric conforming to the
+ // "kestrel.queued_connections" semantic conventions. It represents the number
+ // of connections that are currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ KestrelQueuedConnectionsName = "kestrel.queued_connections"
+ KestrelQueuedConnectionsUnit = "{connection}"
+ KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
+
+ // KestrelQueuedRequests is the metric conforming to the
+ // "kestrel.queued_requests" semantic conventions. It represents the number of
+ // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
+ // currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ KestrelQueuedRequestsName = "kestrel.queued_requests"
+ KestrelQueuedRequestsUnit = "{request}"
+ KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
+
+ // KestrelUpgradedConnections is the metric conforming to the
+ // "kestrel.upgraded_connections" semantic conventions. It represents the
+ // number of connections that are currently upgraded (WebSockets). .
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
+ KestrelUpgradedConnectionsUnit = "{connection}"
+ KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
+
+ // KestrelTLSHandshakeDuration is the metric conforming to the
+ // "kestrel.tls_handshake.duration" semantic conventions. It represents the
+ // duration of TLS handshakes on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
+ KestrelTLSHandshakeDurationUnit = "s"
+ KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
+
+ // KestrelActiveTLSHandshakes is the metric conforming to the
+ // "kestrel.active_tls_handshakes" semantic conventions. It represents the
+ // number of TLS handshakes that are currently in progress on the server.
+ // Instrument: updowncounter
+ // Unit: {handshake}
+ // Stability: Experimental
+ KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
+ KestrelActiveTLSHandshakesUnit = "{handshake}"
+ KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
+
+ // SignalrServerConnectionDuration is the metric conforming to the
+ // "signalr.server.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ SignalrServerConnectionDurationName = "signalr.server.connection.duration"
+ SignalrServerConnectionDurationUnit = "s"
+ SignalrServerConnectionDurationDescription = "The duration of connections on the server."
+
+ // SignalrServerActiveConnections is the metric conforming to the
+ // "signalr.server.active_connections" semantic conventions. It represents the
+ // number of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ SignalrServerActiveConnectionsName = "signalr.server.active_connections"
+ SignalrServerActiveConnectionsUnit = "{connection}"
+ SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's logic execution.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInvokeDurationName = "faas.invoke_duration"
+ FaaSInvokeDurationUnit = "s"
+ FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
+
+ // FaaSInitDuration is the metric conforming to the "faas.init_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's initialization, such as a cold start.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInitDurationName = "faas.init_duration"
+ FaaSInitDurationUnit = "s"
+ FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
+
+ // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
+ // conventions. It represents the number of invocation cold starts.
+ // Instrument: counter
+ // Unit: {coldstart}
+ // Stability: Experimental
+ FaaSColdstartsName = "faas.coldstarts"
+ FaaSColdstartsUnit = "{coldstart}"
+ FaaSColdstartsDescription = "Number of invocation cold starts"
+
+ // FaaSErrors is the metric conforming to the "faas.errors" semantic
+ // conventions. It represents the number of invocation errors.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ FaaSErrorsName = "faas.errors"
+ FaaSErrorsUnit = "{error}"
+ FaaSErrorsDescription = "Number of invocation errors"
+
+ // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
+ // conventions. It represents the number of successful invocations.
+ // Instrument: counter
+ // Unit: {invocation}
+ // Stability: Experimental
+ FaaSInvocationsName = "faas.invocations"
+ FaaSInvocationsUnit = "{invocation}"
+ FaaSInvocationsDescription = "Number of successful invocations"
+
+ // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
+ // conventions. It represents the number of invocation timeouts.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ FaaSTimeoutsName = "faas.timeouts"
+ FaaSTimeoutsUnit = "{timeout}"
+ FaaSTimeoutsDescription = "Number of invocation timeouts"
+
+ // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
+ // conventions. It represents the distribution of max memory usage per
+ // invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSMemUsageName = "faas.mem_usage"
+ FaaSMemUsageUnit = "By"
+ FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
+
+ // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
+ // conventions. It represents the distribution of CPU usage per invocation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSCPUUsageName = "faas.cpu_usage"
+ FaaSCPUUsageUnit = "s"
+ FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
+
+ // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
+ // conventions. It represents the distribution of net I/O usage per invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSNetIoName = "faas.net_io"
+ FaaSNetIoUnit = "By"
+ FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
+
+ // HTTPServerRequestDuration is the metric conforming to the
+ // "http.server.request.duration" semantic conventions. It represents the
+ // duration of HTTP server requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPServerRequestDurationName = "http.server.request.duration"
+ HTTPServerRequestDurationUnit = "s"
+ HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
+
+ // HTTPServerActiveRequests is the metric conforming to the
+ // "http.server.active_requests" semantic conventions. It represents the number
+ // of active HTTP server requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPServerActiveRequestsName = "http.server.active_requests"
+ HTTPServerActiveRequestsUnit = "{request}"
+ HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
+
+ // HTTPServerRequestBodySize is the metric conforming to the
+ // "http.server.request.body.size" semantic conventions. It represents the size
+ // of HTTP server request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerRequestBodySizeName = "http.server.request.body.size"
+ HTTPServerRequestBodySizeUnit = "By"
+ HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
+
+ // HTTPServerResponseBodySize is the metric conforming to the
+ // "http.server.response.body.size" semantic conventions. It represents the
+ // size of HTTP server response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerResponseBodySizeName = "http.server.response.body.size"
+ HTTPServerResponseBodySizeUnit = "By"
+ HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
+
+ // HTTPClientRequestDuration is the metric conforming to the
+ // "http.client.request.duration" semantic conventions. It represents the
+ // duration of HTTP client requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPClientRequestDurationName = "http.client.request.duration"
+ HTTPClientRequestDurationUnit = "s"
+ HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
+
+ // HTTPClientRequestBodySize is the metric conforming to the
+ // "http.client.request.body.size" semantic conventions. It represents the size
+ // of HTTP client request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientRequestBodySizeName = "http.client.request.body.size"
+ HTTPClientRequestBodySizeUnit = "By"
+ HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
+
+ // HTTPClientResponseBodySize is the metric conforming to the
+ // "http.client.response.body.size" semantic conventions. It represents the
+ // size of HTTP client response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientResponseBodySizeName = "http.client.response.body.size"
+ HTTPClientResponseBodySizeUnit = "By"
+ HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
+
+ // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
+ // conventions. It represents the measure of initial memory requested.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmMemoryInitName = "jvm.memory.init"
+ JvmMemoryInitUnit = "By"
+ JvmMemoryInitDescription = "Measure of initial memory requested."
+
+ // JvmSystemCPUUtilization is the metric conforming to the
+ // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
+ // CPU utilization for the whole system as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
+ JvmSystemCPUUtilizationUnit = "1"
+ JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
+
+ // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
+ // semantic conventions. It represents the average CPU load of the whole system
+ // for the last minute as reported by the JVM.
+ // Instrument: gauge
+ // Unit: {run_queue_item}
+ // Stability: Experimental
+ JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
+ JvmSystemCPULoad1mUnit = "{run_queue_item}"
+ JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
+
+ // JvmBufferMemoryUsage is the metric conforming to the
+ // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
+ // memory used by buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
+ JvmBufferMemoryUsageUnit = "By"
+ JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
+
+ // JvmBufferMemoryLimit is the metric conforming to the
+ // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
+ // total memory capacity of buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
+ JvmBufferMemoryLimitUnit = "By"
+ JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
+
+ // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
+ // conventions. It represents the number of buffers in the pool.
+ // Instrument: updowncounter
+ // Unit: {buffer}
+ // Stability: Experimental
+ JvmBufferCountName = "jvm.buffer.count"
+ JvmBufferCountUnit = "{buffer}"
+ JvmBufferCountDescription = "Number of buffers in the pool."
+
+ // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
+ // conventions. It represents the measure of memory used.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedName = "jvm.memory.used"
+ JvmMemoryUsedUnit = "By"
+ JvmMemoryUsedDescription = "Measure of memory used."
+
+ // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
+ // semantic conventions. It represents the measure of memory committed.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryCommittedName = "jvm.memory.committed"
+ JvmMemoryCommittedUnit = "By"
+ JvmMemoryCommittedDescription = "Measure of memory committed."
+
+ // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
+ // conventions. It represents the measure of max obtainable memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryLimitName = "jvm.memory.limit"
+ JvmMemoryLimitUnit = "By"
+ JvmMemoryLimitDescription = "Measure of max obtainable memory."
+
+ // JvmMemoryUsedAfterLastGc is the metric conforming to the
+ // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
+ // measure of memory used, as measured after the most recent garbage collection
+ // event on this pool.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
+ JvmMemoryUsedAfterLastGcUnit = "By"
+ JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
+
+ // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
+ // conventions. It represents the duration of JVM garbage collection actions.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ JvmGcDurationName = "jvm.gc.duration"
+ JvmGcDurationUnit = "s"
+ JvmGcDurationDescription = "Duration of JVM garbage collection actions."
+
+ // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
+ // conventions. It represents the number of executing platform threads.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Stable
+ JvmThreadCountName = "jvm.thread.count"
+ JvmThreadCountUnit = "{thread}"
+ JvmThreadCountDescription = "Number of executing platform threads."
+
+ // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
+ // conventions. It represents the number of classes loaded since JVM start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassLoadedName = "jvm.class.loaded"
+ JvmClassLoadedUnit = "{class}"
+ JvmClassLoadedDescription = "Number of classes loaded since JVM start."
+
+ // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
+ // semantic conventions. It represents the number of classes unloaded since JVM
+ // start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassUnloadedName = "jvm.class.unloaded"
+ JvmClassUnloadedUnit = "{class}"
+ JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
+
+ // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
+ // conventions. It represents the number of classes currently loaded.
+ // Instrument: updowncounter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassCountName = "jvm.class.count"
+ JvmClassCountUnit = "{class}"
+ JvmClassCountDescription = "Number of classes currently loaded."
+
+ // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
+ // conventions. It represents the number of processors available to the Java
+ // virtual machine.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Stable
+ JvmCPUCountName = "jvm.cpu.count"
+ JvmCPUCountUnit = "{cpu}"
+ JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
+
+ // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
+ // conventions. It represents the cPU time used by the process as reported by
+ // the JVM.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Stable
+ JvmCPUTimeName = "jvm.cpu.time"
+ JvmCPUTimeUnit = "s"
+ JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
+
+ // JvmCPURecentUtilization is the metric conforming to the
+ // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
+ // CPU utilization for the process as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Stable
+ JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
+ JvmCPURecentUtilizationUnit = "1"
+ JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
+
+ // MessagingPublishDuration is the metric conforming to the
+ // "messaging.publish.duration" semantic conventions. It represents the
+ // measures the duration of publish operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingPublishDurationName = "messaging.publish.duration"
+ MessagingPublishDurationUnit = "s"
+ MessagingPublishDurationDescription = "Measures the duration of publish operation."
+
+ // MessagingReceiveDuration is the metric conforming to the
+ // "messaging.receive.duration" semantic conventions. It represents the
+ // measures the duration of receive operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingReceiveDurationName = "messaging.receive.duration"
+ MessagingReceiveDurationUnit = "s"
+ MessagingReceiveDurationDescription = "Measures the duration of receive operation."
+
+ // MessagingDeliverDuration is the metric conforming to the
+ // "messaging.deliver.duration" semantic conventions. It represents the
+ // measures the duration of deliver operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingDeliverDurationName = "messaging.deliver.duration"
+ MessagingDeliverDurationUnit = "s"
+ MessagingDeliverDurationDescription = "Measures the duration of deliver operation."
+
+ // MessagingPublishMessages is the metric conforming to the
+ // "messaging.publish.messages" semantic conventions. It represents the
+ // measures the number of published messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingPublishMessagesName = "messaging.publish.messages"
+ MessagingPublishMessagesUnit = "{message}"
+ MessagingPublishMessagesDescription = "Measures the number of published messages."
+
+ // MessagingReceiveMessages is the metric conforming to the
+ // "messaging.receive.messages" semantic conventions. It represents the
+ // measures the number of received messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingReceiveMessagesName = "messaging.receive.messages"
+ MessagingReceiveMessagesUnit = "{message}"
+ MessagingReceiveMessagesDescription = "Measures the number of received messages."
+
+ // MessagingDeliverMessages is the metric conforming to the
+ // "messaging.deliver.messages" semantic conventions. It represents the
+ // measures the number of delivered messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingDeliverMessagesName = "messaging.deliver.messages"
+ MessagingDeliverMessagesUnit = "{message}"
+ MessagingDeliverMessagesDescription = "Measures the number of delivered messages."
+
+ // RPCServerDuration is the metric conforming to the "rpc.server.duration"
+ // semantic conventions. It represents the measures the duration of inbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCServerDurationName = "rpc.server.duration"
+ RPCServerDurationUnit = "ms"
+ RPCServerDurationDescription = "Measures the duration of inbound RPC."
+
+ // RPCServerRequestSize is the metric conforming to the
+ // "rpc.server.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerRequestSizeName = "rpc.server.request.size"
+ RPCServerRequestSizeUnit = "By"
+ RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCServerResponseSize is the metric conforming to the
+ // "rpc.server.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerResponseSizeName = "rpc.server.response.size"
+ RPCServerResponseSizeUnit = "By"
+ RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCServerRequestsPerRPC is the metric conforming to the
+ // "rpc.server.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
+ RPCServerRequestsPerRPCUnit = "{count}"
+ RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCServerResponsesPerRPC is the metric conforming to the
+ // "rpc.server.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
+ RPCServerResponsesPerRPCUnit = "{count}"
+ RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // RPCClientDuration is the metric conforming to the "rpc.client.duration"
+ // semantic conventions. It represents the measures the duration of outbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCClientDurationName = "rpc.client.duration"
+ RPCClientDurationUnit = "ms"
+ RPCClientDurationDescription = "Measures the duration of outbound RPC."
+
+ // RPCClientRequestSize is the metric conforming to the
+ // "rpc.client.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientRequestSizeName = "rpc.client.request.size"
+ RPCClientRequestSizeUnit = "By"
+ RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCClientResponseSize is the metric conforming to the
+ // "rpc.client.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientResponseSizeName = "rpc.client.response.size"
+ RPCClientResponseSizeUnit = "By"
+ RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCClientRequestsPerRPC is the metric conforming to the
+ // "rpc.client.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
+ RPCClientRequestsPerRPCUnit = "{count}"
+ RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCClientResponsesPerRPC is the metric conforming to the
+ // "rpc.client.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
+ RPCClientResponsesPerRPCUnit = "{count}"
+ RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
+ // conventions. It represents the seconds each logical CPU spent on each mode.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemCPUTimeName = "system.cpu.time"
+ SystemCPUTimeUnit = "s"
+ SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
+
+ // SystemCPUUtilization is the metric conforming to the
+ // "system.cpu.utilization" semantic conventions. It represents the difference
+ // in system.cpu.time since the last measurement, divided by the elapsed time
+ // and number of logical CPUs.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ SystemCPUUtilizationName = "system.cpu.utilization"
+ SystemCPUUtilizationUnit = "1"
+ SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
+
+ // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
+ // semantic conventions. It represents the reports the current frequency of the
+ // CPU in Hz.
+ // Instrument: gauge
+ // Unit: {Hz}
+ // Stability: Experimental
+ SystemCPUFrequencyName = "system.cpu.frequency"
+ SystemCPUFrequencyUnit = "{Hz}"
+ SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
+
+ // SystemCPUPhysicalCount is the metric conforming to the
+ // "system.cpu.physical.count" semantic conventions. It represents the reports
+ // the number of actual physical processor cores on the hardware.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPUPhysicalCountName = "system.cpu.physical.count"
+ SystemCPUPhysicalCountUnit = "{cpu}"
+ SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
+
+ // SystemCPULogicalCount is the metric conforming to the
+ // "system.cpu.logical.count" semantic conventions. It represents the reports
+ // the number of logical (virtual) processor cores created by the operating
+ // system to manage multitasking.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPULogicalCountName = "system.cpu.logical.count"
+ SystemCPULogicalCountUnit = "{cpu}"
+ SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
+
+ // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
+ // semantic conventions. It represents the reports memory in use by state.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryUsageName = "system.memory.usage"
+ SystemMemoryUsageUnit = "By"
+ SystemMemoryUsageDescription = "Reports memory in use by state."
+
+ // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
+ // semantic conventions. It represents the total memory available in the
+ // system.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryLimitName = "system.memory.limit"
+ SystemMemoryLimitUnit = "By"
+ SystemMemoryLimitDescription = "Total memory available in the system."
+
+ // SystemMemoryUtilization is the metric conforming to the
+ // "system.memory.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemMemoryUtilizationName = "system.memory.utilization"
+ SystemMemoryUtilizationUnit = "1"
+
+ // SystemPagingUsage is the metric conforming to the "system.paging.usage"
+ // semantic conventions. It represents the unix swap or windows pagefile usage.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemPagingUsageName = "system.paging.usage"
+ SystemPagingUsageUnit = "By"
+ SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
+
+ // SystemPagingUtilization is the metric conforming to the
+ // "system.paging.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingUtilizationName = "system.paging.utilization"
+ SystemPagingUtilizationUnit = "1"
+
+ // SystemPagingFaults is the metric conforming to the "system.paging.faults"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingFaultsName = "system.paging.faults"
+ SystemPagingFaultsUnit = "{fault}"
+
+ // SystemPagingOperations is the metric conforming to the
+ // "system.paging.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingOperationsName = "system.paging.operations"
+ SystemPagingOperationsUnit = "{operation}"
+
+ // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskIoName = "system.disk.io"
+ SystemDiskIoUnit = "By"
+
+ // SystemDiskOperations is the metric conforming to the
+ // "system.disk.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskOperationsName = "system.disk.operations"
+ SystemDiskOperationsUnit = "{operation}"
+
+ // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
+ // semantic conventions. It represents the time disk spent activated.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskIoTimeName = "system.disk.io_time"
+ SystemDiskIoTimeUnit = "s"
+ SystemDiskIoTimeDescription = "Time disk spent activated"
+
+ // SystemDiskOperationTime is the metric conforming to the
+ // "system.disk.operation_time" semantic conventions. It represents the sum of
+ // the time each operation took to complete.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskOperationTimeName = "system.disk.operation_time"
+ SystemDiskOperationTimeUnit = "s"
+ SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
+
+ // SystemDiskMerged is the metric conforming to the "system.disk.merged"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskMergedName = "system.disk.merged"
+ SystemDiskMergedUnit = "{operation}"
+
+ // SystemFilesystemUsage is the metric conforming to the
+ // "system.filesystem.usage" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUsageName = "system.filesystem.usage"
+ SystemFilesystemUsageUnit = "By"
+
+ // SystemFilesystemUtilization is the metric conforming to the
+ // "system.filesystem.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUtilizationName = "system.filesystem.utilization"
+ SystemFilesystemUtilizationUnit = "1"
+
+ // SystemNetworkDropped is the metric conforming to the
+ // "system.network.dropped" semantic conventions. It represents the count of
+ // packets that are dropped or discarded even though there was no error.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ SystemNetworkDroppedName = "system.network.dropped"
+ SystemNetworkDroppedUnit = "{packet}"
+ SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
+
+ // SystemNetworkPackets is the metric conforming to the
+ // "system.network.packets" semantic conventions.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkPacketsName = "system.network.packets"
+ SystemNetworkPacketsUnit = "{packet}"
+
+ // SystemNetworkErrors is the metric conforming to the "system.network.errors"
+ // semantic conventions. It represents the count of network errors detected.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ SystemNetworkErrorsName = "system.network.errors"
+ SystemNetworkErrorsUnit = "{error}"
+ SystemNetworkErrorsDescription = "Count of network errors detected"
+
+ // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkIoName = "system.network.io"
+ SystemNetworkIoUnit = "By"
+
+ // SystemNetworkConnections is the metric conforming to the
+ // "system.network.connections" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkConnectionsName = "system.network.connections"
+ SystemNetworkConnectionsUnit = "{connection}"
+
+ // SystemProcessesCount is the metric conforming to the
+ // "system.processes.count" semantic conventions. It represents the total
+ // number of processes in each state.
+ // Instrument: updowncounter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessesCountName = "system.processes.count"
+ SystemProcessesCountUnit = "{process}"
+ SystemProcessesCountDescription = "Total number of processes in each state"
+
+ // SystemProcessesCreated is the metric conforming to the
+ // "system.processes.created" semantic conventions. It represents the total
+ // number of processes created over uptime of the host.
+ // Instrument: counter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessesCreatedName = "system.processes.created"
+ SystemProcessesCreatedUnit = "{process}"
+ SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host"
+
+ // SystemLinuxMemoryAvailable is the metric conforming to the
+ // "system.linux.memory.available" semantic conventions. It represents an
+ // estimate of how much memory is available for starting new applications,
+ // without causing swapping.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemLinuxMemoryAvailableName = "system.linux.memory.available"
+ SystemLinuxMemoryAvailableUnit = "By"
+ SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
new file mode 100644
index 0000000000000..d66bbe9c23df9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go
@@ -0,0 +1,2545 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // The ID is assinged by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the
+ // repo digests of the container image as provided by the container
+ // runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+ // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+ // Note:
+ // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+ // and
+ // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+ // report those under the `RepoDigests` field.
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image
+ // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+ // Should be only the `<tag>` section of the full name for example from
+ // `registry.example.com/my-org/my-image:<tag>`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'v1.27.1', '3.5.7-0'
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `<tag>` section of the full name for example from
+// `registry.example.com/my-org/my-image:<tag>`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// Describes device attributes.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine-readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human-readable version of
+ // the device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+ // of level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the
+ // "host.cpu.family" semantic conventions. It represents the family or
+ // generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', 'PA-RISC 1.1e'
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the
+ // "host.cpu.model.id" semantic conventions. It represents the model
+ // identifier. It provides more granular information about the CPU,
+ // distinguishing it from other CPUs within the same family.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', '9000/778/B180L'
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the
+ // "host.cpu.stepping" semantic conventions. It represents the stepping or
+ // core revisions.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'GenuineIntel'
+ // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+ // ID string in EBX, EDX and ECX registers. Writing these to memory in this
+ // order results in a 12-character string.
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC
+ // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+ // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+ // as hyphen-separated octets in uppercase hexadecimal form from most to
+ // least significant.
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val int) attribute.KeyValue {
+ return HostCPUSteppingKey.Int(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Kubernetes resource attributes.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S doesn't have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+ // OciManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of
+ // the OCI image manifest. For container images specifically is the digest
+ // by which the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+ // Note: Follows [OCI Image Manifest
+ // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+ // and specifically the [Digest
+ // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+ // An example can be found in [Example Image
+ // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+ OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+ return OciManifestDigestKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+ // semantic conventions. It represents the unique identifier for a
+ // particular build or compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// The Android platform on which the Android application is running.
+const (
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version
+ // (`os.version`) of the android operating system. More information can be
+ // found
+ // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '33', '32'
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Resource used by Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Resources used by Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// Heroku dyno metadata
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'staging', 'production'
+ // Note: `deployment.environment` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id`
+ // resource attributes.
+ // This implies that resources carrying the following attribute
+ // combinations MUST be
+ // considered to be identifying the same service:
+ //
+ // * `service.name=frontend`, `deployment.environment=production`
+ // * `service.name=frontend`, `deployment.environment=staging`.
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-k8s-pod-deployment-1',
+ // '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of
+ // the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'parts-unlimited-java'
+ // Note: Official auto instrumentation agents and distributions SHOULD set
+ // the `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the
+ // version string of the auto instrumentation agent or distribution, if
+ // used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OTelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ // Deprecated: use the `otel.scope.name` attribute.
+ OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OTelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ // Deprecated: use the `otel.scope.version` attribute.
+ OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions.
+//
+// Deprecated: use the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+ return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions.
+//
+// Deprecated: use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+ return OTelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
new file mode 100644
index 0000000000000..fe80b1731d02f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.24.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
new file mode 100644
index 0000000000000..c1718234e5222
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go
@@ -0,0 +1,1323 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeStacktraceKey is the attribute Key conforming to the
+ // "code.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'at
+ // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: experimental
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: experimental
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+)
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+)
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
index 1ea9275b8b7ac..a01ef43577df1 100644
--- a/vendor/golang.org/x/crypto/sha3/shake.go
+++ b/vendor/golang.org/x/crypto/sha3/shake.go
@@ -85,9 +85,9 @@ func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
// leftEncode returns max 9 bytes
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
- c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...)
c.initBlock = append(c.initBlock, N...)
- c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...)
c.initBlock = append(c.initBlock, S...)
c.Write(bytepad(c.initBlock, c.rate))
return &c
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
new file mode 100644
index 0000000000000..de58dfb8dc492
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -0,0 +1,122 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "math"
+ "net/http"
+ "time"
+)
+
+// http2Config is a package-internal version of net/http.HTTP2Config.
+//
+// http.HTTP2Config was added in Go 1.24.
+// When running with a version of net/http that includes HTTP2Config,
+// we merge the configuration with the fields in Transport or Server
+// to produce an http2Config.
+//
+// Zero valued fields in http2Config are interpreted as in the
+// net/http.HTTPConfig documentation.
+//
+// Precedence order for reconciling configurations is:
+//
+// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
+// - Otherwise use the http2.{Server.Transport} value.
+// - If the resulting value is zero or out of range, use a default.
+type http2Config struct {
+ MaxConcurrentStreams uint32
+ MaxDecoderHeaderTableSize uint32
+ MaxEncoderHeaderTableSize uint32
+ MaxReadFrameSize uint32
+ MaxUploadBufferPerConnection int32
+ MaxUploadBufferPerStream int32
+ SendPingTimeout time.Duration
+ PingTimeout time.Duration
+ WriteByteTimeout time.Duration
+ PermitProhibitedCipherSuites bool
+ CountError func(errType string)
+}
+
+// configFromServer merges configuration settings from
+// net/http.Server.HTTP2Config and http2.Server.
+func configFromServer(h1 *http.Server, h2 *Server) http2Config {
+ conf := http2Config{
+ MaxConcurrentStreams: h2.MaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
+ MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
+ CountError: h2.CountError,
+ }
+ fillNetHTTPServerConfig(&conf, h1)
+ setConfigDefaults(&conf, true)
+ return conf
+}
+
+// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
+// (the net/http Transport).
+func configFromTransport(h2 *Transport) http2Config {
+ conf := http2Config{
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ }
+
+ // Unlike most config fields, where out-of-range values revert to the default,
+ // Transport.MaxReadFrameSize clips.
+ if conf.MaxReadFrameSize < minMaxFrameSize {
+ conf.MaxReadFrameSize = minMaxFrameSize
+ } else if conf.MaxReadFrameSize > maxFrameSize {
+ conf.MaxReadFrameSize = maxFrameSize
+ }
+
+ if h2.t1 != nil {
+ fillNetHTTPTransportConfig(&conf, h2.t1)
+ }
+ setConfigDefaults(&conf, false)
+ return conf
+}
+
+func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
+ if *v < minval || *v > maxval {
+ *v = defval
+ }
+}
+
+func setConfigDefaults(conf *http2Config, server bool) {
+ setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
+ setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ if server {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
+ }
+ if server {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
+ }
+ setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
+ setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
+}
+
+// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
+// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
+func adjustHTTP1MaxHeaderSize(n int64) int64 {
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return n + typicalHeaders*perFieldOverhead
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
new file mode 100644
index 0000000000000..e3784123c81a6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go124.go
@@ -0,0 +1,61 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.24
+
+package http2
+
+import "net/http"
+
+// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
+ fillNetHTTPConfig(conf, srv.HTTP2)
+}
+
+// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
+ fillNetHTTPConfig(conf, tr.HTTP2)
+}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
new file mode 100644
index 0000000000000..060fd6c64c6ca
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_pre_go124.go
@@ -0,0 +1,16 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.24
+
+package http2
+
+import "net/http"
+
+// Pre-Go 1.24 fallback.
+// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
+
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
+
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 003e649f30c6c..7688c356b7cba 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -19,8 +19,9 @@ import (
"bufio"
"context"
"crypto/tls"
+ "errors"
"fmt"
- "io"
+ "net"
"net/http"
"os"
"sort"
@@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() {
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
- _ incomparable
- w io.Writer // immutable
- bw *bufio.Writer // non-nil when data is buffered
+ _ incomparable
+ group synctestGroupInterface // immutable
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(w io.Writer) *bufferedWriter {
- return &bufferedWriter{w: w}
+func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+ return &bufferedWriter{
+ group: group,
+ conn: conn,
+ byteTimeout: timeout,
+ }
}
// bufWriterPoolBufferSize is the size of bufio.Writer's
@@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int {
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
- bw.Reset(w.w)
+ bw.Reset((*bufferedWriterTimeoutWriter)(w))
w.bw = bw
}
return w.bw.Write(p)
@@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error {
return err
}
+type bufferedWriterTimeoutWriter bufferedWriter
+
+func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
+ return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+}
+
+// writeWithByteTimeout writes to conn.
+// If more than timeout passes without any bytes being written to the connection,
+// the write fails.
+func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+ if timeout <= 0 {
+ return conn.Write(p)
+ }
+ for {
+ var now time.Time
+ if group == nil {
+ now = time.Now()
+ } else {
+ now = group.Now()
+ }
+ conn.SetWriteDeadline(now.Add(timeout))
+ nn, err := conn.Write(p[n:])
+ n += nn
+ if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
+ // Either we finished the write, made no progress, or hit the deadline.
+ // Whichever it is, we're done now.
+ conn.SetWriteDeadline(time.Time{})
+ return n, err
+ }
+ }
+}
+
func mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 {
panic("out of range")
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 6c349f3ec6473..617b4a47623b2 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -29,6 +29,7 @@ import (
"bufio"
"bytes"
"context"
+ "crypto/rand"
"crypto/tls"
"errors"
"fmt"
@@ -52,10 +53,14 @@ import (
)
const (
- prefaceTimeout = 10 * time.Second
- firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- handlerChunkWriteSize = 4 << 10
- defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+
+ // maxQueuedControlFrames is the maximum number of control frames like
+ // SETTINGS, PING and RST_STREAM that will be queued for writing before
+ // the connection is closed to prevent memory exhaustion attacks.
maxQueuedControlFrames = 10000
)
@@ -127,6 +132,22 @@ type Server struct {
// If zero or negative, there is no timeout.
IdleTimeout time.Duration
+ // ReadIdleTimeout is the timeout after which a health check using a ping
+ // frame will be carried out if no frame is received on the connection.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to a ping is not received.
+ // If zero, a default of 15 seconds is used.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which a connection will be
+ // closed if no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ // If zero or negative, there is no timeout.
+ WriteByteTimeout time.Duration
+
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
@@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer {
return timeTimer{time.AfterFunc(d, f)}
}
-func (s *Server) initialConnRecvWindowSize() int32 {
- if s.MaxUploadBufferPerConnection >= initialWindowSize {
- return s.MaxUploadBufferPerConnection
- }
- return 1 << 20
-}
-
-func (s *Server) initialStreamRecvWindowSize() int32 {
- if s.MaxUploadBufferPerStream > 0 {
- return s.MaxUploadBufferPerStream
- }
- return 1 << 20
-}
-
-func (s *Server) maxReadFrameSize() uint32 {
- if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
- return v
- }
- return defaultMaxReadFrameSize
-}
-
-func (s *Server) maxConcurrentStreams() uint32 {
- if v := s.MaxConcurrentStreams; v > 0 {
- return v
- }
- return defaultMaxStreams
-}
-
-func (s *Server) maxDecoderHeaderTableSize() uint32 {
- if v := s.MaxDecoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-func (s *Server) maxEncoderHeaderTableSize() uint32 {
- if v := s.MaxEncoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-// maxQueuedControlFrames is the maximum number of control frames like
-// SETTINGS, PING and RST_STREAM that will be queued for writing before
-// the connection is closed to prevent memory exhaustion attacks.
-func (s *Server) maxQueuedControlFrames() int {
- // TODO: if anybody asks, add a Server field, and remember to define the
- // behavior of negative values.
- return maxQueuedControlFrames
-}
-
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
@@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel()
+ http1srv := opts.baseConfig()
+ conf := configFromServer(http1srv, s)
sc := &serverConn{
srv: s,
- hs: opts.baseConfig(),
+ hs: http1srv,
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
+ bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
- advMaxStreams: s.maxConcurrentStreams(),
+ advMaxStreams: conf.MaxConcurrentStreams,
initialStreamSendWindowSize: initialWindowSize,
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxFrameSize: initialMaxFrameSize,
+ pingTimeout: conf.PingTimeout,
+ countErrorFunc: conf.CountError,
serveG: newGoroutineLock(),
pushEnabled: true,
sawClientPreface: opts.SawClientPreface,
@@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
sc.flow.add(initialWindowSize)
sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
- sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
+ sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
fr := NewFramer(sc.bw, c)
- if s.CountError != nil {
- fr.countError = s.CountError
+ if conf.CountError != nil {
+ fr.countError = conf.CountError
}
- fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
+ fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
- fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
sc.framer = fr
if tc, ok := c.(connectionStater); ok {
@@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
// So for now, do nothing here again.
}
- if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
@@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
opts.UpgradeRequest = nil
}
- sc.serve()
+ sc.serve(conf)
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
@@ -609,6 +584,7 @@ type serverConn struct {
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
+ countErrorFunc func(errType string)
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
@@ -628,6 +604,7 @@ type serverConn struct {
streams map[uint32]*stream
unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
+ initialStreamRecvWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
@@ -638,9 +615,14 @@ type serverConn struct {
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ pingSent bool
+ sentPingData [8]byte
goAwayCode ErrCode
shutdownTimer timer // nil until used
idleTimer timer // nil if unused
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
+ readIdleTimer timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
if n <= 0 {
n = http.DefaultMaxHeaderBytes
}
- // http2's count is in a slightly different unit and includes 32 bytes per pair.
- // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
- const perFieldOverhead = 32 // per http2 spec
- const typicalHeaders = 10 // conservative
- return uint32(n + typicalHeaders*perFieldOverhead)
+ return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
}
func (sc *serverConn) curOpenStreams() uint32 {
@@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() {
}
}
-func (sc *serverConn) serve() {
+func (sc *serverConn) serve(conf http2Config) {
sc.serveG.check()
defer sc.notePanic()
defer sc.conn.Close()
@@ -937,18 +915,18 @@ func (sc *serverConn) serve() {
sc.writeFrame(FrameWriteRequest{
write: writeSettings{
- {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxFrameSize, conf.MaxReadFrameSize},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
- {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
- {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
+ {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
+ {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
},
})
sc.unackedSettings++
// Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
- if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+ if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
@@ -968,11 +946,18 @@ func (sc *serverConn) serve() {
defer sc.idleTimer.Stop()
}
+ if conf.SendPingTimeout > 0 {
+ sc.readIdleTimeout = conf.SendPingTimeout
+ sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ defer sc.readIdleTimer.Stop()
+ }
+
go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
+ lastFrameTime := sc.srv.now()
loopNum := 0
for {
loopNum++
@@ -986,6 +971,7 @@ func (sc *serverConn) serve() {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
+ lastFrameTime = sc.srv.now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() {
case idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
+ case readIdleTimerMsg:
+ sc.handlePingTimer(lastFrameTime)
case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
@@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() {
// If the peer is causing us to generate a lot of control frames,
// but not reading them from us, assume they are trying to make us
// run out of memory.
- if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
+ if sc.queuedControlFrames > maxQueuedControlFrames {
sc.vlogf("http2: too many control frames in send queue, closing connection")
return
}
@@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() {
}
}
+func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
+ if sc.pingSent {
+ sc.vlogf("timeout waiting for PING response")
+ sc.conn.Close()
+ return
+ }
+
+ pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
+ now := sc.srv.now()
+ if pingAt.After(now) {
+ // We received frames since arming the ping timer.
+ // Reset it for the next possible timeout.
+ sc.readIdleTimer.Reset(pingAt.Sub(now))
+ return
+ }
+
+ sc.pingSent = true
+ // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
+ // is we send a PING frame containing 0s.
+ _, _ = rand.Read(sc.sentPingData[:])
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePing{data: sc.sentPingData},
+ })
+ sc.readIdleTimer.Reset(sc.pingTimeout)
+}
+
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
+ readIdleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage)
@@ -1068,6 +1083,7 @@ var (
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
+func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
@@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
sc.writingFrame = false
sc.writingFrameAsync = false
+ if res.err != nil {
+ sc.conn.Close()
+ }
+
wr := res.wr
if writeEndsStream(wr.write) {
@@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error {
func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check()
if f.IsAck() {
+ if sc.pingSent && sc.sentPingData == f.Data {
+ // This is a response to a PING we sent.
+ sc.pingSent = false
+ sc.readIdleTimer.Reset(sc.readIdleTimeout)
+ }
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
@@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
- st.inflow.init(sc.srv.initialStreamRecvWindowSize())
+ st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
@@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error {
if sc == nil || sc.srv == nil {
return err
}
- f := sc.srv.CountError
+ f := sc.countErrorFunc
if f == nil {
return err
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 61f511f97aa44..0c5f64aa8bef7 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
- "os"
"sort"
"strconv"
"strings"
@@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co
}
func (t *Transport) maxHeaderListSize() uint32 {
- if t.MaxHeaderListSize == 0 {
+ n := int64(t.MaxHeaderListSize)
+ if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
+ n = t.t1.MaxResponseHeaderBytes
+ if n > 0 {
+ n = adjustHTTP1MaxHeaderSize(n)
+ }
+ }
+ if n <= 0 {
return 10 << 20
}
- if t.MaxHeaderListSize == 0xffffffff {
+ if n >= 0xffffffff {
return 0
}
- return t.MaxHeaderListSize
-}
-
-func (t *Transport) maxFrameReadSize() uint32 {
- if t.MaxReadFrameSize == 0 {
- return 0 // use the default provided by the peer
- }
- if t.MaxReadFrameSize < minMaxFrameSize {
- return minMaxFrameSize
- }
- if t.MaxReadFrameSize > maxFrameSize {
- return maxFrameSize
- }
- return t.MaxReadFrameSize
+ return uint32(n)
}
func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
-func (t *Transport) pingTimeout() time.Duration {
- if t.PingTimeout == 0 {
- return 15 * time.Second
- }
- return t.PingTimeout
-
-}
-
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled.
//
@@ -370,11 +355,14 @@ type ClientConn struct {
lastActive time.Time
lastIdle time.Time // time last idle
// Settings from peer: (also guarded by wmu)
- maxFrameSize uint32
- maxConcurrentStreams uint32
- peerMaxHeaderListSize uint64
- peerMaxHeaderTableSize uint32
- initialWindowSize uint32
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ peerMaxHeaderTableSize uint32
+ initialWindowSize uint32
+ initialStreamRecvWindowSize int32
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
@@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() {
}
type stickyErrWriter struct {
+ group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- for {
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
- }
- nn, err := sew.conn.Write(p[n:])
- n += nn
- if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
- // Keep extending the deadline so long as we're making progress.
- continue
- }
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Time{})
- }
- *sew.err = err
- return n, err
- }
+ n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ *sew.err = err
+ return n, err
}
// noCachedConnError is the concrete type of ErrNoCachedConn, which
@@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration {
return t.t1.ExpectContinueTimeout
}
-func (t *Transport) maxDecoderHeaderTableSize() uint32 {
- if v := t.MaxDecoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-func (t *Transport) maxEncoderHeaderTableSize() uint32 {
- if v := t.MaxEncoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, t.disableKeepAlives())
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+ conf := configFromTransport(t)
cc := &ClientConn{
- t: t,
- tconn: c,
- readerDone: make(chan struct{}),
- nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
- streams: make(map[uint32]*clientStream),
- singleUse: singleUse,
- wantSettingsAck: true,
- pings: make(map[[8]byte]chan struct{}),
- reqHeaderMu: make(chan struct{}, 1),
- }
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
+ maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
+ readIdleTimeout: conf.SendPingTimeout,
+ pingTimeout: conf.PingTimeout,
+ pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
+ }
+ var group synctestGroupInterface
if t.transportTestHooks != nil {
t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
+ group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
+ group: group,
conn: c,
- timeout: t.WriteByteTimeout,
+ timeout: conf.WriteByteTimeout,
err: &cc.werr,
})
cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
- if t.maxFrameReadSize() != 0 {
- cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
- }
+ cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
if t.CountError != nil {
cc.fr.countError = t.CountError
}
- maxHeaderTableSize := t.maxDecoderHeaderTableSize()
+ maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
cc.henc = hpack.NewEncoder(&cc.hbuf)
- cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
+ cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
cc.peerMaxHeaderTableSize = initialHeaderTableSize
if cs, ok := c.(connectionStater); ok {
@@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialSettings := []Setting{
{ID: SettingEnablePush, Val: 0},
- {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
- }
- if max := t.maxFrameReadSize(); max != 0 {
- initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
+ {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
}
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
}
@@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
- cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
- cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
+ cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
+ cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
cc.bw.Flush()
if cc.werr != nil {
cc.Close()
@@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
}
func (cc *ClientConn) healthCheck() {
- pingTimeout := cc.t.pingTimeout()
+ pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
@@ -2199,7 +2164,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
- cs.inflow.init(transportDefaultStreamFlow)
+ cs.inflow.init(cc.initialStreamRecvWindowSize)
cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
@@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) {
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
- readIdleTimeout := cc.t.ReadIdleTimeout
+ readIdleTimeout := cc.readIdleTimeout
var t timer
if readIdleTimeout != 0 {
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 33f61398a1236..6ff6bee7e9549 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error {
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+type writePing struct {
+ data [8]byte
+}
+
+func (w writePing) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(false, w.data)
+}
+
+func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
+
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
index 7d3c060e12213..6e08a76a716e9 100644
--- a/vendor/golang.org/x/sys/unix/README.md
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these
into a common file for each OS.
The merge is performed in the following steps:
-1. Construct the set of common code that is idential in all architecture-specific files.
+1. Construct the set of common code that is identical in all architecture-specific files.
2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files.
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index e14b766a32c5f..ac54ecaba0a4b 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -656,7 +656,7 @@ errors=$(
signals=$(
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
- grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
+ grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort
)
@@ -666,7 +666,7 @@ echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
sort >_error.grep
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
- grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
+ grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go
index 67ce6cef2d5c4..6f15ba1eaff65 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int,
var status _C_int
var r Pid_t
err = ERESTART
- // AIX wait4 may return with ERESTART errno, while the processus is still
+ // AIX wait4 may return with ERESTART errno, while the process is still
// active.
for err == ERESTART {
r, err = wait4(Pid_t(pid), &status, options, rusage)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 3f1d3d4cb2560..f08abd434ff47 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
return &value, err
}
+// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPVegasInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
+// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
+// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPBBRInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
@@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) {
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
-//sys Getrandom(buf []byte, flags int) (n int, err error)
+
+func Getrandom(buf []byte, flags int) (n int, err error) {
+ vdsoRet, supported := vgetrandom(buf, uint32(flags))
+ if supported {
+ if vdsoRet < 0 {
+ return 0, errnoErr(syscall.Errno(-vdsoRet))
+ }
+ return vdsoRet, nil
+ }
+ var p *byte
+ if len(buf) > 0 {
+ p = &buf[0]
+ }
+ r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags))
+ if e != 0 {
+ return 0, errnoErr(e)
+ }
+ return int(r), nil
+}
+
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index cf2ee6c75ef3d..745e5c7e6c0d5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
index 3d0e98451f8a7..dd2262a40799a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
@@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
index 6f5a288944dfe..8cf3670bda630 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
@@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error
}
return riscvHWProbe(pairs, setSize, set, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go
new file mode 100644
index 0000000000000..07ac8e09d1b70
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go
@@ -0,0 +1,13 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && go1.24
+
+package unix
+
+import _ "unsafe"
+
+//go:linkname vgetrandom runtime.vgetrandom
+//go:noescape
+func vgetrandom(p []byte, flags uint32) (ret int, supported bool)
diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go
new file mode 100644
index 0000000000000..297e97bce92a6
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go
@@ -0,0 +1,11 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !linux || !go1.24
+
+package unix
+
+func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
+ return -1, false
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 01a70b24638e6..de3b462489c0b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -495,6 +495,7 @@ const (
BPF_F_TEST_REG_INVARIANTS = 0x80
BPF_F_TEST_RND_HI32 = 0x4
BPF_F_TEST_RUN_ON_CPU = 0x1
+ BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4
BPF_F_TEST_STATE_FREQ = 0x8
BPF_F_TEST_XDP_LIVE_FRAMES = 0x2
BPF_F_XDP_DEV_BOUND_ONLY = 0x40
@@ -1922,6 +1923,7 @@ const (
MNT_EXPIRE = 0x4
MNT_FORCE = 0x1
MNT_ID_REQ_SIZE_VER0 = 0x18
+ MNT_ID_REQ_SIZE_VER1 = 0x20
MODULE_INIT_COMPRESSED_FILE = 0x4
MODULE_INIT_IGNORE_MODVERSIONS = 0x1
MODULE_INIT_IGNORE_VERMAGIC = 0x2
@@ -2187,7 +2189,7 @@ const (
NFT_REG_SIZE = 0x10
NFT_REJECT_ICMPX_MAX = 0x3
NFT_RT_MAX = 0x4
- NFT_SECMARK_CTX_MAXLEN = 0x100
+ NFT_SECMARK_CTX_MAXLEN = 0x1000
NFT_SET_MAXNAMELEN = 0x100
NFT_SOCKET_MAX = 0x3
NFT_TABLE_F_MASK = 0x7
@@ -2356,9 +2358,11 @@ const (
PERF_MEM_LVLNUM_IO = 0xa
PERF_MEM_LVLNUM_L1 = 0x1
PERF_MEM_LVLNUM_L2 = 0x2
+ PERF_MEM_LVLNUM_L2_MHB = 0x5
PERF_MEM_LVLNUM_L3 = 0x3
PERF_MEM_LVLNUM_L4 = 0x4
PERF_MEM_LVLNUM_LFB = 0xc
+ PERF_MEM_LVLNUM_MSC = 0x6
PERF_MEM_LVLNUM_NA = 0xf
PERF_MEM_LVLNUM_PMEM = 0xe
PERF_MEM_LVLNUM_RAM = 0xd
@@ -2431,6 +2435,7 @@ const (
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
+ PROCFS_IOCTL_MAGIC = 'f'
PROC_SUPER_MAGIC = 0x9fa0
PROT_EXEC = 0x4
PROT_GROWSDOWN = 0x1000000
@@ -2933,11 +2938,12 @@ const (
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10
+ RWF_ATOMIC = 0x40
RWF_DSYNC = 0x2
RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8
- RWF_SUPPORTED = 0x3f
+ RWF_SUPPORTED = 0x7f
RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
@@ -3210,6 +3216,7 @@ const (
STATX_ATTR_MOUNT_ROOT = 0x2000
STATX_ATTR_NODUMP = 0x40
STATX_ATTR_VERITY = 0x100000
+ STATX_ATTR_WRITE_ATOMIC = 0x400000
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
@@ -3226,6 +3233,7 @@ const (
STATX_SUBVOL = 0x8000
STATX_TYPE = 0x1
STATX_UID = 0x8
+ STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
@@ -3624,6 +3632,7 @@ const (
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
+ XDP_UMEM_TX_METADATA_LEN = 0x4
XDP_UMEM_TX_SW_CSUM = 0x2
XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1
XDP_USE_NEED_WAKEUP = 0x8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 684a5168dac4e..8aa6d77c0184e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -153,9 +153,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 61d74b592d686..da428f4253398 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -153,9 +153,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index a28c9e3e893ad..bf45bfec78a53 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index ab5d1fe8ead78..71c67162b737e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -154,9 +154,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index c523090e7c17e..9476628fa02b8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -154,9 +154,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 01e6ea7804b12..b9e85f3cf0c05 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 7aa610b1e717b..a48b68a7647ef 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 92af771b44a35..ea00e8522a159 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index b27ef5e6f1195..91c64687176a9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 237a2cefb3e5a..8cbf38d639016 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 4a5c555a36e2b..a2df7341917ec 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index a02fb49a5f8ad..2479137923331 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -152,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index e26a7c61b2b6f..d265f146ee016 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index c48f7c2103b81..3f2d6443964ff 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -150,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index ad4b9aace7bb6..5d8b727a1c837 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -155,9 +155,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 1bc1a5adb25fd..af30da5578031 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Getrandom(buf []byte, flags int) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index d3e38f681ab03..f485dbf456567 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -341,6 +341,7 @@ const (
SYS_STATX = 332
SYS_IO_PGETEVENTS = 333
SYS_RSEQ = 334
+ SYS_URETPROBE = 335
SYS_PIDFD_SEND_SIGNAL = 424
SYS_IO_URING_SETUP = 425
SYS_IO_URING_ENTER = 426
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 6c778c23278f9..1893e2fe88404 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -85,7 +85,7 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
- SYS_FSTATAT = 79
+ SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 37281cf51a80b..16a4017da0ab2 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -84,6 +84,8 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
+ SYS_NEWFSTATAT = 79
+ SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
SYS_FDATASYNC = 83
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 9889f6a5591b6..a5459e766f59d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -84,7 +84,7 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
- SYS_FSTATAT = 79
+ SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 9f2550dc3120d..3a69e45496268 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -87,31 +87,35 @@ type StatxTimestamp struct {
}
type Statx_t struct {
- Mask uint32
- Blksize uint32
- Attributes uint64
- Nlink uint32
- Uid uint32
- Gid uint32
- Mode uint16
- _ [1]uint16
- Ino uint64
- Size uint64
- Blocks uint64
- Attributes_mask uint64
- Atime StatxTimestamp
- Btime StatxTimestamp
- Ctime StatxTimestamp
- Mtime StatxTimestamp
- Rdev_major uint32
- Rdev_minor uint32
- Dev_major uint32
- Dev_minor uint32
- Mnt_id uint64
- Dio_mem_align uint32
- Dio_offset_align uint32
- Subvol uint64
- _ [11]uint64
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ Mnt_id uint64
+ Dio_mem_align uint32
+ Dio_offset_align uint32
+ Subvol uint64
+ Atomic_write_unit_min uint32
+ Atomic_write_unit_max uint32
+ Atomic_write_segments_max uint32
+ _ [1]uint32
+ _ [9]uint64
}
type Fsid struct {
@@ -516,6 +520,29 @@ type TCPInfo struct {
Total_rto_time uint32
}
+type TCPVegasInfo struct {
+ Enabled uint32
+ Rttcnt uint32
+ Rtt uint32
+ Minrtt uint32
+}
+
+type TCPDCTCPInfo struct {
+ Enabled uint16
+ Ce_state uint16
+ Alpha uint32
+ Ab_ecn uint32
+ Ab_tot uint32
+}
+
+type TCPBBRInfo struct {
+ Bw_lo uint32
+ Bw_hi uint32
+ Min_rtt uint32
+ Pacing_gain uint32
+ Cwnd_gain uint32
+}
+
type CanFilter struct {
Id uint32
Mask uint32
@@ -557,6 +584,7 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0xf8
+ SizeofTCPCCInfo = 0x14
SizeofCanFilter = 0x8
SizeofTCPRepairOpt = 0x8
)
@@ -3766,7 +3794,7 @@ const (
ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_RSS_GET = 0x26
- ETHTOOL_MSG_USER_MAX = 0x2b
+ ETHTOOL_MSG_USER_MAX = 0x2c
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@@ -3806,7 +3834,7 @@ const (
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_RSS_GET_REPLY = 0x26
- ETHTOOL_MSG_KERNEL_MAX = 0x2b
+ ETHTOOL_MSG_KERNEL_MAX = 0x2c
ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
ETHTOOL_FLAG_OMIT_REPLY = 0x2
ETHTOOL_FLAG_STATS = 0x4
@@ -3951,7 +3979,7 @@ const (
ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17
ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18
ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19
- ETHTOOL_A_COALESCE_MAX = 0x1c
+ ETHTOOL_A_COALESCE_MAX = 0x1e
ETHTOOL_A_PAUSE_UNSPEC = 0x0
ETHTOOL_A_PAUSE_HEADER = 0x1
ETHTOOL_A_PAUSE_AUTONEG = 0x2
@@ -4609,7 +4637,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14a
+ NL80211_ATTR_MAX = 0x14c
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@@ -5213,7 +5241,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x20
+ NL80211_FREQUENCY_ATTR_MAX = 0x21
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
index 115341fba66da..4e613cf6335ce 100644
--- a/vendor/golang.org/x/sys/windows/dll_windows.go
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) {
return d, nil
}
-// MustLoadDLL is like LoadDLL but panics if load operation failes.
+// MustLoadDLL is like LoadDLL but panics if load operation fails.
func MustLoadDLL(name string) *DLL {
d, e := LoadDLL(name)
if e != nil {
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 8f6c7f493f8e9..93a798ab63704 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
t, tokens := lim.advance(t)
diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json
index 70bd1cc810f51..7ce5ff86ac875 100644
--- a/vendor/google.golang.org/api/compute/v1/compute-api.json
+++ b/vendor/google.golang.org/api/compute/v1/compute-api.json
@@ -1915,7 +1915,7 @@
]
},
"listUsable": {
- "description": "Retrieves an aggregated list of all usable backend services in the specified project.",
+ "description": "Retrieves a list of all usable backend services in the specified project.",
"flatPath": "projects/{project}/global/backendServices/listUsable",
"httpMethod": "GET",
"id": "compute.backendServices.listUsable",
@@ -4814,364 +4814,6 @@
}
}
},
- "futureReservations": {
- "methods": {
- "aggregatedList": {
- "description": "Retrieves an aggregated list of future reservations. To prevent failure, Google recommends that you set the `returnPartialSuccess` parameter to `true`.",
- "flatPath": "projects/{project}/aggregated/futureReservations",
- "httpMethod": "GET",
- "id": "compute.futureReservations.aggregatedList",
- "parameterOrder": [
- "project"
- ],
- "parameters": {
- "filter": {
- "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.",
- "location": "query",
- "type": "string"
- },
- "includeAllScopes": {
- "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.",
- "location": "query",
- "type": "boolean"
- },
- "maxResults": {
- "default": "500",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "orderBy": {
- "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.",
- "location": "query",
- "type": "string"
- },
- "pageToken": {
- "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.",
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "returnPartialSuccess": {
- "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false. For example, when partial success behavior is enabled, aggregatedList for a single zone scope either returns all resources in the zone or no resources, with an error code.",
- "location": "query",
- "type": "boolean"
- },
- "serviceProjectNumber": {
- "description": "The Shared VPC service project id or service project number for which aggregated list request is invoked for subnetworks list-usable api.",
- "format": "int64",
- "location": "query",
- "type": "string"
- }
- },
- "path": "projects/{project}/aggregated/futureReservations",
- "response": {
- "$ref": "FutureReservationsAggregatedListResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/compute.readonly"
- ]
- },
- "cancel": {
- "description": "Cancel the specified future reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}/cancel",
- "httpMethod": "POST",
- "id": "compute.futureReservations.cancel",
- "parameterOrder": [
- "project",
- "zone",
- "futureReservation"
- ],
- "parameters": {
- "futureReservation": {
- "description": "Name of the future reservation to retrieve. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}/cancel",
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- },
- "delete": {
- "description": "Deletes the specified future reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "httpMethod": "DELETE",
- "id": "compute.futureReservations.delete",
- "parameterOrder": [
- "project",
- "zone",
- "futureReservation"
- ],
- "parameters": {
- "futureReservation": {
- "description": "Name of the future reservation to retrieve. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- },
- "get": {
- "description": "Retrieves information about the specified future reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "httpMethod": "GET",
- "id": "compute.futureReservations.get",
- "parameterOrder": [
- "project",
- "zone",
- "futureReservation"
- ],
- "parameters": {
- "futureReservation": {
- "description": "Name of the future reservation to retrieve. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "response": {
- "$ref": "FutureReservation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/compute.readonly"
- ]
- },
- "insert": {
- "description": "Creates a new Future Reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations",
- "httpMethod": "POST",
- "id": "compute.futureReservations.insert",
- "parameterOrder": [
- "project",
- "zone"
- ],
- "parameters": {
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations",
- "request": {
- "$ref": "FutureReservation"
- },
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- },
- "list": {
- "description": "A list of all the future reservations that have been configured for the specified project in specified zone.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations",
- "httpMethod": "GET",
- "id": "compute.futureReservations.list",
- "parameterOrder": [
- "project",
- "zone"
- ],
- "parameters": {
- "filter": {
- "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.",
- "location": "query",
- "type": "string"
- },
- "maxResults": {
- "default": "500",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "orderBy": {
- "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.",
- "location": "query",
- "type": "string"
- },
- "pageToken": {
- "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.",
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "returnPartialSuccess": {
- "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false. For example, when partial success behavior is enabled, aggregatedList for a single zone scope either returns all resources in the zone or no resources, with an error code.",
- "location": "query",
- "type": "boolean"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations",
- "response": {
- "$ref": "FutureReservationsListResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/compute.readonly"
- ]
- },
- "update": {
- "description": "Updates the specified future reservation.",
- "flatPath": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "httpMethod": "PATCH",
- "id": "compute.futureReservations.update",
- "parameterOrder": [
- "project",
- "zone",
- "futureReservation"
- ],
- "parameters": {
- "futureReservation": {
- "description": "Name of the reservation to update. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "updateMask": {
- "description": "update_mask indicates fields to be updated as part of this request.",
- "format": "google-fieldmask",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "Name of the zone for this request. Name should conform to RFC1035.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/futureReservations/{futureReservation}",
- "request": {
- "$ref": "FutureReservation"
- },
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- }
- }
- },
"globalAddresses": {
"methods": {
"delete": {
@@ -20638,7 +20280,7 @@
]
},
"listUsable": {
- "description": "Retrieves an aggregated list of all usable backend services in the specified project in the given region.",
+ "description": "Retrieves a list of all usable backend services in the specified project in the given region.",
"flatPath": "projects/{project}/regions/{region}/backendServices/listUsable",
"httpMethod": "GET",
"id": "compute.regionBackendServices.listUsable",
@@ -37779,7 +37421,7 @@
}
}
},
- "revision": "20240827",
+ "revision": "20241001",
"rootUrl": "https://compute.googleapis.com/",
"schemas": {
"AWSV4Signature": {
@@ -39508,13 +39150,6 @@
},
"type": "array"
},
- "exemptedMembers": {
- "description": "This is deprecated and has no effect. Do not use.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
"service": {
"description": "Specifies a service that will be enabled for audit logging. For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices` is a special value that covers all services.",
"type": "string"
@@ -39533,10 +39168,6 @@
},
"type": "array"
},
- "ignoreChildExemptions": {
- "description": "This is deprecated and has no effect. Do not use.",
- "type": "boolean"
- },
"logType": {
"description": "The log type that this config enables.",
"enum": [
@@ -40488,6 +40119,13 @@
"selfLink": {
"description": "[Output Only] Server-defined URL for the resource.",
"type": "string"
+ },
+ "usedBy": {
+ "description": "[Output Only] List of resources referencing that backend bucket.",
+ "items": {
+ "$ref": "BackendBucketUsedBy"
+ },
+ "type": "array"
}
},
"type": "object"
@@ -40774,6 +40412,16 @@
},
"type": "object"
},
+ "BackendBucketUsedBy": {
+ "id": "BackendBucketUsedBy",
+ "properties": {
+ "reference": {
+ "description": "[Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"BackendService": {
"description": "Represents a Backend Service resource. A backend service defines how Google Cloud load balancers distribute traffic. The backend service configuration contains a set of values, such as the protocol used to connect to backends, various distribution and session settings, health checks, and timeouts. These settings provide fine-grained control over how your load balancer behaves. Most of the settings have default values that allow for easy configuration if you need to get started quickly. Backend services in Google Compute Engine can be either regionally or globally scoped. * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices) For more information, see Backend Services.",
"id": "BackendService",
@@ -40875,6 +40523,22 @@
"format": "uint64",
"type": "string"
},
+ "ipAddressSelectionPolicy": {
+ "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced global external Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, - Internal proxy Network Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional internal Application Load Balancer (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ",
+ "enum": [
+ "IPV4_ONLY",
+ "IPV6_ONLY",
+ "IP_ADDRESS_SELECTION_POLICY_UNSPECIFIED",
+ "PREFER_IPV6"
+ ],
+ "enumDescriptions": [
+ "Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting.",
+ "Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends.",
+ "Unspecified IP address selection policy.",
+ "Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address)."
+ ],
+ "type": "string"
+ },
"kind": {
"default": "compute#backendService",
"description": "[Output Only] Type of resource. Always compute#backendService for backend services.",
@@ -42291,7 +41955,7 @@
},
"locationPolicy": {
"$ref": "LocationPolicy",
- "description": "Policy for chosing target zone. For more information, see Create VMs in bulk ."
+ "description": "Policy for choosing target zone. For more information, see Create VMs in bulk."
},
"minCount": {
"description": "The minimum number of instances to create. If no min_count is specified then count is used as the default value. If min_count instances cannot be created, then no instances will be created and instances already created will be deleted.",
@@ -42491,6 +42155,10 @@
"description": "[Output Only] Creation timestamp in RFC3339 text format.",
"type": "string"
},
+ "customEndTimestamp": {
+ "description": "[Input Only] Optional, specifies the CUD end time requested by the customer in RFC3339 text format. Needed when the customer wants CUD's end date is later than the start date + term duration.",
+ "type": "string"
+ },
"description": {
"description": "An optional description of this resource. Provide this property when you create the resource.",
"type": "string"
@@ -42557,6 +42225,10 @@
},
"type": "array"
},
+ "resourceStatus": {
+ "$ref": "CommitmentResourceStatus",
+ "description": "[Output Only] Status information for Commitment resource."
+ },
"resources": {
"description": "A list of commitment amounts for particular resources. Note that VCPU and MEMORY resource commitments must occur together.",
"items": {
@@ -42960,6 +42632,17 @@
},
"type": "object"
},
+ "CommitmentResourceStatus": {
+ "description": "[Output Only] Contains output only fields.",
+ "id": "CommitmentResourceStatus",
+ "properties": {
+ "customTermEligibilityEndTimestamp": {
+ "description": "[Output Only] Indicates the end time of customer's eligibility to send custom term requests in RFC3339 text format. Term extension requests that (not the end time in the request) after this time will be rejected.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"CommitmentsScopedList": {
"id": "CommitmentsScopedList",
"properties": {
@@ -43183,12 +42866,14 @@
"enum": [
"CONFIDENTIAL_INSTANCE_TYPE_UNSPECIFIED",
"SEV",
- "SEV_SNP"
+ "SEV_SNP",
+ "TDX"
],
"enumDescriptions": [
"No type specified. Do not use this value.",
"AMD Secure Encrypted Virtualization.",
- "AMD Secure Encrypted Virtualization - Secure Nested Paging."
+ "AMD Secure Encrypted Virtualization - Secure Nested Paging.",
+ "Intel Trust Domain eXtension."
],
"type": "string"
},
@@ -46979,781 +46664,6 @@
},
"type": "object"
},
- "FutureReservation": {
- "id": "FutureReservation",
- "properties": {
- "autoCreatedReservationsDeleteTime": {
- "description": "Future timestamp when the FR auto-created reservations will be deleted by Compute Engine. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.",
- "type": "string"
- },
- "autoCreatedReservationsDuration": {
- "$ref": "Duration",
- "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by Compute Engine. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution."
- },
- "autoDeleteAutoCreatedReservations": {
- "description": "Setting for enabling or disabling automatic deletion for auto-created reservation. If set to true, auto-created reservations will be deleted at Future Reservation's end time (default) or at user's defined timestamp if any of the [auto_created_reservations_delete_time, auto_created_reservations_duration] values is specified. For keeping auto-created reservation indefinitely, this value should be set to false.",
- "type": "boolean"
- },
- "creationTimestamp": {
- "description": "[Output Only] The creation timestamp for this future reservation in RFC3339 text format.",
- "type": "string"
- },
- "description": {
- "description": "An optional description of this resource. Provide this property when you create the future reservation.",
- "type": "string"
- },
- "id": {
- "description": "[Output Only] A unique identifier for this future reservation. The server defines this identifier.",
- "format": "uint64",
- "type": "string"
- },
- "kind": {
- "default": "compute#futureReservation",
- "description": "[Output Only] Type of the resource. Always compute#futureReservation for future reservations.",
- "type": "string"
- },
- "name": {
- "annotations": {
- "required": [
- "compute.instances.insert"
- ]
- },
- "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.",
- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
- "type": "string"
- },
- "namePrefix": {
- "description": "Name prefix for the reservations to be created at the time of delivery. The name prefix must comply with RFC1035. Maximum allowed length for name prefix is 20. Automatically created reservations name format will be -date-####.",
- "type": "string"
- },
- "planningStatus": {
- "description": "Planning state before being submitted for evaluation",
- "enum": [
- "DRAFT",
- "PLANNING_STATUS_UNSPECIFIED",
- "SUBMITTED"
- ],
- "enumDescriptions": [
- "Future Reservation is being drafted.",
- "",
- "Future Reservation has been submitted for evaluation by GCP."
- ],
- "type": "string"
- },
- "selfLink": {
- "description": "[Output Only] Server-defined fully-qualified URL for this resource.",
- "type": "string"
- },
- "selfLinkWithId": {
- "description": "[Output Only] Server-defined URL for this resource with the resource id.",
- "type": "string"
- },
- "shareSettings": {
- "$ref": "ShareSettings",
- "description": "List of Projects/Folders to share with."
- },
- "specificSkuProperties": {
- "$ref": "FutureReservationSpecificSKUProperties",
- "description": "Future Reservation configuration to indicate instance properties and total count."
- },
- "status": {
- "$ref": "FutureReservationStatus",
- "description": "[Output only] Status of the Future Reservation"
- },
- "timeWindow": {
- "$ref": "FutureReservationTimeWindow",
- "description": "Time window for this Future Reservation."
- },
- "zone": {
- "description": "[Output Only] URL of the Zone where this future reservation resides.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationSpecificSKUProperties": {
- "id": "FutureReservationSpecificSKUProperties",
- "properties": {
- "instanceProperties": {
- "$ref": "AllocationSpecificSKUAllocationReservedInstanceProperties",
- "description": "Properties of the SKU instances being reserved."
- },
- "sourceInstanceTemplate": {
- "description": "The instance template that will be used to populate the ReservedInstanceProperties of the future reservation",
- "type": "string"
- },
- "totalCount": {
- "description": "Total number of instances for which capacity assurance is requested at a future time period.",
- "format": "int64",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationStatus": {
- "description": "[Output only] Represents status related to the future reservation.",
- "id": "FutureReservationStatus",
- "properties": {
- "amendmentStatus": {
- "description": "[Output Only] The current status of the requested amendment.",
- "enum": [
- "AMENDMENT_APPROVED",
- "AMENDMENT_DECLINED",
- "AMENDMENT_IN_REVIEW",
- "AMENDMENT_STATUS_UNSPECIFIED"
- ],
- "enumDescriptions": [
- "The requested amendment to the Future Resevation has been approved and applied by GCP.",
- "The requested amendment to the Future Reservation has been declined by GCP and the original state was restored.",
- "The requested amendment to the Future Reservation is currently being reviewd by GCP.",
- ""
- ],
- "type": "string"
- },
- "autoCreatedReservations": {
- "description": "Fully qualified urls of the automatically created reservations at start_time.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "existingMatchingUsageInfo": {
- "$ref": "FutureReservationStatusExistingMatchingUsageInfo",
- "description": "[Output Only] Represents the existing matching usage for the future reservation."
- },
- "fulfilledCount": {
- "description": "This count indicates the fulfilled capacity so far. This is set during \"PROVISIONING\" state. This count also includes capacity delivered as part of existing matching reservations.",
- "format": "int64",
- "type": "string"
- },
- "lastKnownGoodState": {
- "$ref": "FutureReservationStatusLastKnownGoodState",
- "description": "[Output Only] This field represents the future reservation before an amendment was requested. If the amendment is declined, the Future Reservation will be reverted to the last known good state. The last known good state is not set when updating a future reservation whose Procurement Status is DRAFTING."
- },
- "lockTime": {
- "description": "Time when Future Reservation would become LOCKED, after which no modifications to Future Reservation will be allowed. Applicable only after the Future Reservation is in the APPROVED state. The lock_time is an RFC3339 string. The procurement_status will transition to PROCURING state at this time.",
- "type": "string"
- },
- "procurementStatus": {
- "description": "Current state of this Future Reservation",
- "enum": [
- "APPROVED",
- "CANCELLED",
- "COMMITTED",
- "DECLINED",
- "DRAFTING",
- "FAILED",
- "FAILED_PARTIALLY_FULFILLED",
- "FULFILLED",
- "PENDING_AMENDMENT_APPROVAL",
- "PENDING_APPROVAL",
- "PROCUREMENT_STATUS_UNSPECIFIED",
- "PROCURING",
- "PROVISIONING"
- ],
- "enumDescriptions": [
- "Future reservation is approved by GCP.",
- "Future reservation is cancelled by the customer.",
- "Future reservation is committed by the customer.",
- "Future reservation is rejected by GCP.",
- "Related status for PlanningStatus.Draft. Transitions to PENDING_APPROVAL upon user submitting FR.",
- "Future reservation failed. No additional reservations were provided.",
- "Future reservation is partially fulfilled. Additional reservations were provided but did not reach total_count reserved instance slots.",
- "Future reservation is fulfilled completely.",
- "An Amendment to the Future Reservation has been requested. If the Amendment is declined, the Future Reservation will be restored to the last known good state.",
- "Future reservation is pending approval by GCP.",
- "",
- "Future reservation is being procured by GCP. Beyond this point, Future reservation is locked and no further modifications are allowed.",
- "Future reservation capacity is being provisioned. This state will be entered after start_time, while reservations are being created to provide total_count reserved instance slots. This state will not persist past start_time + 24h."
- ],
- "type": "string"
- },
- "specificSkuProperties": {
- "$ref": "FutureReservationStatusSpecificSKUProperties"
- }
- },
- "type": "object"
- },
- "FutureReservationStatusExistingMatchingUsageInfo": {
- "description": "[Output Only] Represents the existing matching usage for the future reservation.",
- "id": "FutureReservationStatusExistingMatchingUsageInfo",
- "properties": {
- "count": {
- "description": "Count to represent min(FR total_count, matching_reserved_capacity+matching_unreserved_instances)",
- "format": "int64",
- "type": "string"
- },
- "timestamp": {
- "description": "Timestamp when the matching usage was calculated",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationStatusLastKnownGoodState": {
- "description": "The state that the future reservation will be reverted to should the amendment be declined.",
- "id": "FutureReservationStatusLastKnownGoodState",
- "properties": {
- "description": {
- "description": "[Output Only] The description of the FutureReservation before an amendment was requested.",
- "type": "string"
- },
- "existingMatchingUsageInfo": {
- "$ref": "FutureReservationStatusExistingMatchingUsageInfo",
- "description": "[Output Only] Represents the matching usage for the future reservation before an amendment was requested."
- },
- "futureReservationSpecs": {
- "$ref": "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs"
- },
- "lockTime": {
- "description": "[Output Only] The lock time of the FutureReservation before an amendment was requested.",
- "type": "string"
- },
- "namePrefix": {
- "description": "[Output Only] The name prefix of the Future Reservation before an amendment was requested.",
- "type": "string"
- },
- "procurementStatus": {
- "description": "[Output Only] The status of the last known good state for the Future Reservation.",
- "enum": [
- "APPROVED",
- "CANCELLED",
- "COMMITTED",
- "DECLINED",
- "DRAFTING",
- "FAILED",
- "FAILED_PARTIALLY_FULFILLED",
- "FULFILLED",
- "PENDING_AMENDMENT_APPROVAL",
- "PENDING_APPROVAL",
- "PROCUREMENT_STATUS_UNSPECIFIED",
- "PROCURING",
- "PROVISIONING"
- ],
- "enumDescriptions": [
- "Future reservation is approved by GCP.",
- "Future reservation is cancelled by the customer.",
- "Future reservation is committed by the customer.",
- "Future reservation is rejected by GCP.",
- "Related status for PlanningStatus.Draft. Transitions to PENDING_APPROVAL upon user submitting FR.",
- "Future reservation failed. No additional reservations were provided.",
- "Future reservation is partially fulfilled. Additional reservations were provided but did not reach total_count reserved instance slots.",
- "Future reservation is fulfilled completely.",
- "An Amendment to the Future Reservation has been requested. If the Amendment is declined, the Future Reservation will be restored to the last known good state.",
- "Future reservation is pending approval by GCP.",
- "",
- "Future reservation is being procured by GCP. Beyond this point, Future reservation is locked and no further modifications are allowed.",
- "Future reservation capacity is being provisioned. This state will be entered after start_time, while reservations are being created to provide total_count reserved instance slots. This state will not persist past start_time + 24h."
- ],
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs": {
- "description": "The properties of the last known good state for the Future Reservation.",
- "id": "FutureReservationStatusLastKnownGoodStateFutureReservationSpecs",
- "properties": {
- "shareSettings": {
- "$ref": "ShareSettings",
- "description": "[Output Only] The previous share settings of the Future Reservation."
- },
- "specificSkuProperties": {
- "$ref": "FutureReservationSpecificSKUProperties",
- "description": "[Output Only] The previous instance related properties of the Future Reservation."
- },
- "timeWindow": {
- "$ref": "FutureReservationTimeWindow",
- "description": "[Output Only] The previous time window of the Future Reservation."
- }
- },
- "type": "object"
- },
- "FutureReservationStatusSpecificSKUProperties": {
- "description": "Properties to be set for the Future Reservation.",
- "id": "FutureReservationStatusSpecificSKUProperties",
- "properties": {
- "sourceInstanceTemplateId": {
- "description": "ID of the instance template used to populate the Future Reservation properties.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationTimeWindow": {
- "id": "FutureReservationTimeWindow",
- "properties": {
- "duration": {
- "$ref": "Duration"
- },
- "endTime": {
- "type": "string"
- },
- "startTime": {
- "description": "Start time of the Future Reservation. The start_time is an RFC3339 string.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "FutureReservationsAggregatedListResponse": {
- "description": "Contains a list of future reservations.",
- "id": "FutureReservationsAggregatedListResponse",
- "properties": {
- "etag": {
- "type": "string"
- },
- "id": {
- "description": "[Output Only] Unique identifier for the resource; defined by the server.",
- "type": "string"
- },
- "items": {
- "additionalProperties": {
- "$ref": "FutureReservationsScopedList",
- "description": "Name of the scope containing this set of future reservations."
- },
- "description": "A list of Future reservation resources.",
- "type": "object"
- },
- "kind": {
- "default": "compute#futureReservationsAggregatedListResponse",
- "description": "[Output Only] Type of resource. Always compute#futureReservationsAggregatedListResponse for future resevation aggregated list response.",
- "type": "string"
- },
- "nextPageToken": {
- "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.",
- "type": "string"
- },
- "selfLink": {
- "description": "[Output Only] Server-defined URL for this resource.",
- "type": "string"
- },
- "unreachables": {
- "description": "[Output Only] Unreachable resources.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "warning": {
- "description": "[Output Only] Informational warning message.",
- "properties": {
- "code": {
- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.",
- "enum": [
- "CLEANUP_FAILED",
- "DEPRECATED_RESOURCE_USED",
- "DEPRECATED_TYPE_USED",
- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
- "EXPERIMENTAL_TYPE_USED",
- "EXTERNAL_API_WARNING",
- "FIELD_VALUE_OVERRIDEN",
- "INJECTED_KERNELS_DEPRECATED",
- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB",
- "LARGE_DEPLOYMENT_WARNING",
- "LIST_OVERHEAD_QUOTA_EXCEED",
- "MISSING_TYPE_DEPENDENCY",
- "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
- "NEXT_HOP_CANNOT_IP_FORWARD",
- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE",
- "NEXT_HOP_INSTANCE_NOT_FOUND",
- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
- "NEXT_HOP_NOT_RUNNING",
- "NOT_CRITICAL_ERROR",
- "NO_RESULTS_ON_PAGE",
- "PARTIAL_SUCCESS",
- "REQUIRED_TOS_AGREEMENT",
- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
- "RESOURCE_NOT_DELETED",
- "SCHEMA_VALIDATION_IGNORED",
- "SINGLE_INSTANCE_PROPERTY_TEMPLATE",
- "UNDECLARED_PROPERTIES",
- "UNREACHABLE"
- ],
- "enumDeprecated": [
- false,
- false,
- false,
- false,
- false,
- false,
- true,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false
- ],
- "enumDescriptions": [
- "Warning about failed cleanup of transient changes made by a failed operation.",
- "A link to a deprecated resource was created.",
- "When deploying and at least one of the resources has a type marked as deprecated",
- "The user created a boot disk that is larger than image size.",
- "When deploying and at least one of the resources has a type marked as experimental",
- "Warning that is present in an external api call",
- "Warning that value of a field has been overridden. Deprecated unused field.",
- "The operation involved use of an injected kernel, which is deprecated.",
- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.",
- "When deploying a deployment with a exceedingly large number of resources",
- "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.",
- "A resource depends on a missing type",
- "The route's nextHopIp address is not assigned to an instance on the network.",
- "The route's next hop instance cannot ip forward.",
- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.",
- "The route's nextHopInstance URL refers to an instance that does not exist.",
- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.",
- "The route's next hop instance does not have a status of RUNNING.",
- "Error which is not critical. We decided to continue the process despite the mentioned error.",
- "No results are present on a particular list page.",
- "Success is reported, but some results may be missing due to errors",
- "The user attempted to use a resource that requires a TOS they have not accepted.",
- "Warning that a resource is in use.",
- "One or more of the resources set to auto-delete could not be deleted because they were in use.",
- "When a resource schema validation is ignored.",
- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.",
- "When undeclared properties in the schema are present",
- "A given scope cannot be reached."
- ],
- "type": "string"
- },
- "data": {
- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ",
- "items": {
- "properties": {
- "key": {
- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).",
- "type": "string"
- },
- "value": {
- "description": "[Output Only] A warning data value corresponding to the key.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "message": {
- "description": "[Output Only] A human-readable description of the warning code.",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "FutureReservationsListResponse": {
- "id": "FutureReservationsListResponse",
- "properties": {
- "etag": {
- "type": "string"
- },
- "id": {
- "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.",
- "type": "string"
- },
- "items": {
- "description": "[Output Only] A list of future reservation resources.",
- "items": {
- "$ref": "FutureReservation"
- },
- "type": "array"
- },
- "kind": {
- "default": "compute#futureReservationsListResponse",
- "description": "[Output Only] Type of resource.Always compute#FutureReservationsListResponse for lists of reservations",
- "type": "string"
- },
- "nextPageToken": {
- "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.",
- "type": "string"
- },
- "selfLink": {
- "description": "[Output Only] Server-defined URL for this resource.",
- "type": "string"
- },
- "unreachables": {
- "description": "[Output Only] Unreachable resources.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "warning": {
- "description": "[Output Only] Informational warning message.",
- "properties": {
- "code": {
- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.",
- "enum": [
- "CLEANUP_FAILED",
- "DEPRECATED_RESOURCE_USED",
- "DEPRECATED_TYPE_USED",
- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
- "EXPERIMENTAL_TYPE_USED",
- "EXTERNAL_API_WARNING",
- "FIELD_VALUE_OVERRIDEN",
- "INJECTED_KERNELS_DEPRECATED",
- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB",
- "LARGE_DEPLOYMENT_WARNING",
- "LIST_OVERHEAD_QUOTA_EXCEED",
- "MISSING_TYPE_DEPENDENCY",
- "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
- "NEXT_HOP_CANNOT_IP_FORWARD",
- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE",
- "NEXT_HOP_INSTANCE_NOT_FOUND",
- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
- "NEXT_HOP_NOT_RUNNING",
- "NOT_CRITICAL_ERROR",
- "NO_RESULTS_ON_PAGE",
- "PARTIAL_SUCCESS",
- "REQUIRED_TOS_AGREEMENT",
- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
- "RESOURCE_NOT_DELETED",
- "SCHEMA_VALIDATION_IGNORED",
- "SINGLE_INSTANCE_PROPERTY_TEMPLATE",
- "UNDECLARED_PROPERTIES",
- "UNREACHABLE"
- ],
- "enumDeprecated": [
- false,
- false,
- false,
- false,
- false,
- false,
- true,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false
- ],
- "enumDescriptions": [
- "Warning about failed cleanup of transient changes made by a failed operation.",
- "A link to a deprecated resource was created.",
- "When deploying and at least one of the resources has a type marked as deprecated",
- "The user created a boot disk that is larger than image size.",
- "When deploying and at least one of the resources has a type marked as experimental",
- "Warning that is present in an external api call",
- "Warning that value of a field has been overridden. Deprecated unused field.",
- "The operation involved use of an injected kernel, which is deprecated.",
- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.",
- "When deploying a deployment with a exceedingly large number of resources",
- "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.",
- "A resource depends on a missing type",
- "The route's nextHopIp address is not assigned to an instance on the network.",
- "The route's next hop instance cannot ip forward.",
- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.",
- "The route's nextHopInstance URL refers to an instance that does not exist.",
- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.",
- "The route's next hop instance does not have a status of RUNNING.",
- "Error which is not critical. We decided to continue the process despite the mentioned error.",
- "No results are present on a particular list page.",
- "Success is reported, but some results may be missing due to errors",
- "The user attempted to use a resource that requires a TOS they have not accepted.",
- "Warning that a resource is in use.",
- "One or more of the resources set to auto-delete could not be deleted because they were in use.",
- "When a resource schema validation is ignored.",
- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.",
- "When undeclared properties in the schema are present",
- "A given scope cannot be reached."
- ],
- "type": "string"
- },
- "data": {
- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ",
- "items": {
- "properties": {
- "key": {
- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).",
- "type": "string"
- },
- "value": {
- "description": "[Output Only] A warning data value corresponding to the key.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "message": {
- "description": "[Output Only] A human-readable description of the warning code.",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
- "FutureReservationsScopedList": {
- "id": "FutureReservationsScopedList",
- "properties": {
- "futureReservations": {
- "description": "A list of future reservations contained in this scope.",
- "items": {
- "$ref": "FutureReservation"
- },
- "type": "array"
- },
- "warning": {
- "description": "Informational warning which replaces the list of future reservations when the list is empty.",
- "properties": {
- "code": {
- "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.",
- "enum": [
- "CLEANUP_FAILED",
- "DEPRECATED_RESOURCE_USED",
- "DEPRECATED_TYPE_USED",
- "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
- "EXPERIMENTAL_TYPE_USED",
- "EXTERNAL_API_WARNING",
- "FIELD_VALUE_OVERRIDEN",
- "INJECTED_KERNELS_DEPRECATED",
- "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB",
- "LARGE_DEPLOYMENT_WARNING",
- "LIST_OVERHEAD_QUOTA_EXCEED",
- "MISSING_TYPE_DEPENDENCY",
- "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
- "NEXT_HOP_CANNOT_IP_FORWARD",
- "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE",
- "NEXT_HOP_INSTANCE_NOT_FOUND",
- "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
- "NEXT_HOP_NOT_RUNNING",
- "NOT_CRITICAL_ERROR",
- "NO_RESULTS_ON_PAGE",
- "PARTIAL_SUCCESS",
- "REQUIRED_TOS_AGREEMENT",
- "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING",
- "RESOURCE_NOT_DELETED",
- "SCHEMA_VALIDATION_IGNORED",
- "SINGLE_INSTANCE_PROPERTY_TEMPLATE",
- "UNDECLARED_PROPERTIES",
- "UNREACHABLE"
- ],
- "enumDeprecated": [
- false,
- false,
- false,
- false,
- false,
- false,
- true,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false,
- false
- ],
- "enumDescriptions": [
- "Warning about failed cleanup of transient changes made by a failed operation.",
- "A link to a deprecated resource was created.",
- "When deploying and at least one of the resources has a type marked as deprecated",
- "The user created a boot disk that is larger than image size.",
- "When deploying and at least one of the resources has a type marked as experimental",
- "Warning that is present in an external api call",
- "Warning that value of a field has been overridden. Deprecated unused field.",
- "The operation involved use of an injected kernel, which is deprecated.",
- "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.",
- "When deploying a deployment with a exceedingly large number of resources",
- "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.",
- "A resource depends on a missing type",
- "The route's nextHopIp address is not assigned to an instance on the network.",
- "The route's next hop instance cannot ip forward.",
- "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.",
- "The route's nextHopInstance URL refers to an instance that does not exist.",
- "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.",
- "The route's next hop instance does not have a status of RUNNING.",
- "Error which is not critical. We decided to continue the process despite the mentioned error.",
- "No results are present on a particular list page.",
- "Success is reported, but some results may be missing due to errors",
- "The user attempted to use a resource that requires a TOS they have not accepted.",
- "Warning that a resource is in use.",
- "One or more of the resources set to auto-delete could not be deleted because they were in use.",
- "When a resource schema validation is ignored.",
- "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.",
- "When undeclared properties in the schema are present",
- "A given scope cannot be reached."
- ],
- "type": "string"
- },
- "data": {
- "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ",
- "items": {
- "properties": {
- "key": {
- "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).",
- "type": "string"
- },
- "value": {
- "description": "[Output Only] A warning data value corresponding to the key.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "type": "array"
- },
- "message": {
- "description": "[Output Only] A human-readable description of the warning code.",
- "type": "string"
- }
- },
- "type": "object"
- }
- },
- "type": "object"
- },
"GRPCHealthCheck": {
"id": "GRPCHealthCheck",
"properties": {
@@ -49003,6 +47913,21 @@
"description": "For target pool based Network Load Balancing, it indicates the forwarding rule's IP address assigned to this instance. For other types of load balancing, the field indicates VM internal ip.",
"type": "string"
},
+ "ipv6Address": {
+ "type": "string"
+ },
+ "ipv6HealthState": {
+ "description": "Health state of the IPv6 address of the instance.",
+ "enum": [
+ "HEALTHY",
+ "UNHEALTHY"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ],
+ "type": "string"
+ },
"port": {
"description": "The named port of the instance group, not necessarily the port that is health-checked.",
"format": "int32",
@@ -55228,7 +54153,7 @@
"type": "boolean"
},
"availableFeatures": {
- "description": "[Output only] List of features available for this Interconnect connection, which can take one of the following values: - MACSEC If present then the Interconnect connection is provisioned on MACsec capable hardware ports. If not present then the Interconnect connection is provisioned on non-MACsec capable ports and MACsec isn't supported and enabling MACsec fails.",
+ "description": "[Output only] List of features available for this Interconnect connection, which can take one of the following values: - IF_MACSEC If present then the Interconnect connection is provisioned on MACsec capable hardware ports. If not present then the Interconnect connection is provisioned on non-MACsec capable ports and MACsec isn't supported and enabling MACsec fails.",
"items": {
"enum": [
"IF_MACSEC"
@@ -55381,7 +54306,7 @@
"type": "string"
},
"requestedFeatures": {
- "description": "Optional. List of features requested for this Interconnect connection, which can take one of the following values: - MACSEC If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if available. This parameter can be provided only with Interconnect INSERT. It isn't valid for Interconnect PATCH.",
+ "description": "Optional. List of features requested for this Interconnect connection, which can take one of the following values: - IF_MACSEC If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if available. This parameter can be provided only with Interconnect INSERT. It isn't valid for Interconnect PATCH.",
"items": {
"enum": [
"IF_MACSEC"
@@ -60665,6 +59590,11 @@
"description": "[Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB.",
"type": "string"
},
+ "producerPort": {
+ "description": "The psc producer port is used to connect PSC NEG with specific port on the PSC Producer side; should only be used for the PRIVATE_SERVICE_CONNECT NEG type",
+ "format": "int32",
+ "type": "integer"
+ },
"pscConnectionId": {
"description": "[Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer.",
"format": "uint64",
@@ -61132,11 +60062,13 @@
"description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.",
"enum": [
"IPV4_IPV6",
- "IPV4_ONLY"
+ "IPV4_ONLY",
+ "IPV6_ONLY"
],
"enumDescriptions": [
"The network interface can have both IPv4 and IPv6 addresses.",
- "The network interface will be assigned IPv4 address."
+ "The network interface will only be assigned IPv4 addresses.",
+ "The network interface will only be assigned IPv6 addresses."
],
"type": "string"
},
@@ -63872,7 +62804,7 @@
"type": "string"
},
"targetLink": {
- "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from.",
+ "description": "[Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the disk that the snapshot was created from.",
"type": "string"
},
"user": {
@@ -74017,6 +72949,11 @@
"description": "The URL of a forwarding rule with loadBalancingScheme INTERNAL* that is serving the endpoint identified by this service attachment.",
"type": "string"
},
+ "propagatedConnectionLimit": {
+ "description": "The number of consumer spokes that connected Private Service Connect endpoints can be propagated to through Network Connectivity Center. This limit lets the service producer limit how many propagated Private Service Connect connections can be established to this service attachment from a single consumer. If the connection preference of the service attachment is ACCEPT_MANUAL, the limit applies to each project or network that is listed in the consumer accept list. If the connection preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies to each project that contains a connected endpoint. If unspecified, the default propagated connection limit is 250.",
+ "format": "uint32",
+ "type": "integer"
+ },
"pscServiceAttachmentId": {
"$ref": "Uint128",
"description": "[Output Only] An 128-bit global unique ID of the PSC service attachment."
@@ -74212,6 +73149,11 @@
"description": "The url of a connected endpoint.",
"type": "string"
},
+ "propagatedConnectionCount": {
+ "description": "The number of consumer Network Connectivity Center spokes that the connected Private Service Connect endpoint has propagated to.",
+ "format": "uint32",
+ "type": "integer"
+ },
"pscConnectionId": {
"description": "The PSC connection id of the connected endpoint.",
"format": "uint64",
@@ -78077,7 +77019,7 @@
"type": "string"
},
"internalIpv6Prefix": {
- "description": "[Output Only] The internal IPv6 address range that is assigned to this subnetwork.",
+ "description": "The internal IPv6 address range that is owned by this subnetwork.",
"type": "string"
},
"ipCidrRange": {
@@ -78193,11 +77135,13 @@
"description": "The stack type for the subnet. If set to IPV4_ONLY, new VMs in the subnet are assigned IPv4 addresses only. If set to IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 addresses. If not specified, IPV4_ONLY is used. This field can be both set at resource creation time and updated using patch.",
"enum": [
"IPV4_IPV6",
- "IPV4_ONLY"
+ "IPV4_ONLY",
+ "IPV6_ONLY"
],
"enumDescriptions": [
"New VMs in this subnet can have both IPv4 and IPv6 addresses.",
- "New VMs in this subnet will only be assigned IPv4 addresses."
+ "New VMs in this subnet will only be assigned IPv4 addresses.",
+ "New VMs in this subnet will only be assigned IPv6 addresses."
],
"type": "string"
},
@@ -82622,7 +81566,7 @@
"type": "object"
},
"UrlMap": {
- "description": "Represents a URL Map resource. Compute Engine has two URL Map resources: * [Global](/compute/docs/reference/rest/v1/urlMaps) * [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) A URL map resource is a component of certain types of cloud load balancers and Traffic Director: * urlMaps are used by global external Application Load Balancers, classic Application Load Balancers, and cross-region internal Application Load Balancers. * regionUrlMaps are used by internal Application Load Balancers, regional external Application Load Balancers and regional internal Application Load Balancers. For a list of supported URL map features by the load balancer type, see the Load balancing features: Routing and traffic management table. For a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table. This resource defines mappings from hostnames and URL paths to either a backend service or a backend bucket. To use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.",
+ "description": "Represents a URL Map resource. Compute Engine has two URL Map resources: * [Global](/compute/docs/reference/rest/v1/urlMaps) * [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) A URL map resource is a component of certain types of cloud load balancers and Traffic Director: * urlMaps are used by global external Application Load Balancers, classic Application Load Balancers, and cross-region internal Application Load Balancers. * regionUrlMaps are used by internal Application Load Balancers, regional external Application Load Balancers and regional internal Application Load Balancers. For a list of supported URL map features by the load balancer type, see the Load balancing features: Routing and traffic management table. For a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table. This resource defines mappings from hostnames and URL paths to either a backend service or a backend bucket. To use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL, EXTERNAL_MANAGED, or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.",
"id": "UrlMap",
"properties": {
"creationTimestamp": {
@@ -83372,11 +82316,13 @@
"description": "The stack type for the subnet. If set to IPV4_ONLY, new VMs in the subnet are assigned IPv4 addresses only. If set to IPV4_IPV6, new VMs in the subnet can be assigned both IPv4 and IPv6 addresses. If not specified, IPV4_ONLY is used. This field can be both set at resource creation time and updated using patch.",
"enum": [
"IPV4_IPV6",
- "IPV4_ONLY"
+ "IPV4_ONLY",
+ "IPV6_ONLY"
],
"enumDescriptions": [
"New VMs in this subnet can have both IPv4 and IPv6 addresses.",
- "New VMs in this subnet will only be assigned IPv4 addresses."
+ "New VMs in this subnet will only be assigned IPv4 addresses.",
+ "New VMs in this subnet will only be assigned IPv6 addresses."
],
"type": "string"
},
diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go
index ca8f1ffd01f7e..245c3c6871457 100644
--- a/vendor/google.golang.org/api/compute/v1/compute-gen.go
+++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go
@@ -173,7 +173,6 @@ func New(client *http.Client) (*Service, error) {
s.FirewallPolicies = NewFirewallPoliciesService(s)
s.Firewalls = NewFirewallsService(s)
s.ForwardingRules = NewForwardingRulesService(s)
- s.FutureReservations = NewFutureReservationsService(s)
s.GlobalAddresses = NewGlobalAddressesService(s)
s.GlobalForwardingRules = NewGlobalForwardingRulesService(s)
s.GlobalNetworkEndpointGroups = NewGlobalNetworkEndpointGroupsService(s)
@@ -293,8 +292,6 @@ type Service struct {
ForwardingRules *ForwardingRulesService
- FutureReservations *FutureReservationsService
-
GlobalAddresses *GlobalAddressesService
GlobalForwardingRules *GlobalForwardingRulesService
@@ -580,15 +577,6 @@ type ForwardingRulesService struct {
s *Service
}
-func NewFutureReservationsService(s *Service) *FutureReservationsService {
- rs := &FutureReservationsService{s: s}
- return rs
-}
-
-type FutureReservationsService struct {
- s *Service
-}
-
func NewGlobalAddressesService(s *Service) *GlobalAddressesService {
rs := &GlobalAddressesService{s: s}
return rs
@@ -3272,8 +3260,6 @@ func (s AttachedDiskInitializeParams) MarshalJSON() ([]byte, error) {
type AuditConfig struct {
// AuditLogConfigs: The configuration for logging of each type of permission.
AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"`
- // ExemptedMembers: This is deprecated and has no effect. Do not use.
- ExemptedMembers []string `json:"exemptedMembers,omitempty"`
// Service: Specifies a service that will be enabled for audit logging. For
// example, `storage.googleapis.com`, `cloudsql.googleapis.com`. `allServices`
// is a special value that covers all services.
@@ -3305,8 +3291,6 @@ type AuditLogConfig struct {
// ExemptedMembers: Specifies the identities that do not cause logging for this
// type of permission. Follows the same format of Binding.members.
ExemptedMembers []string `json:"exemptedMembers,omitempty"`
- // IgnoreChildExemptions: This is deprecated and has no effect. Do not use.
- IgnoreChildExemptions bool `json:"ignoreChildExemptions,omitempty"`
// LogType: The log type that this config enables.
//
// Possible values:
@@ -4492,6 +4476,8 @@ type BackendBucket struct {
Name string `json:"name,omitempty"`
// SelfLink: [Output Only] Server-defined URL for the resource.
SelfLink string `json:"selfLink,omitempty"`
+ // UsedBy: [Output Only] List of resources referencing that backend bucket.
+ UsedBy []*BackendBucketUsedBy `json:"usedBy,omitempty"`
// ServerResponse contains the HTTP response code and headers from the server.
googleapi.ServerResponse `json:"-"`
@@ -4881,6 +4867,28 @@ func (s BackendBucketListWarningData) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+type BackendBucketUsedBy struct {
+ // Reference: [Output Only] Server-defined URL for UrlMaps referencing that
+ // BackendBucket.
+ Reference string `json:"reference,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "Reference") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "Reference") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s BackendBucketUsedBy) MarshalJSON() ([]byte, error) {
+ type NoMethod BackendBucketUsedBy
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// BackendService: Represents a Backend Service resource. A backend service
// defines how Google Cloud load balancers distribute traffic. The backend
// service configuration contains a set of values, such as the protocol used to
@@ -4984,6 +4992,39 @@ type BackendService struct {
// Id: [Output Only] The unique identifier for the resource. This identifier is
// defined by the server.
Id uint64 `json:"id,omitempty,string"`
+ // IpAddressSelectionPolicy: Specifies a preference for traffic sent from the
+ // proxy to the backend (or from the client to the backend for proxyless gRPC).
+ // The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends
+ // of the backend service (Instance Group, Managed Instance Group, Network
+ // Endpoint Group), regardless of traffic from the client to the proxy. Only
+ // IPv4 health checks are used to check the health of the backends. This is the
+ // default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's
+ // IPv6 address over its IPv4 address (provided there is a healthy IPv6
+ // address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend
+ // service (Instance Group, Managed Instance Group, Network Endpoint Group),
+ // regardless of traffic from the client to the proxy. Only IPv6 health checks
+ // are used to check the health of the backends. This field is applicable to
+ // either: - Advanced global external Application Load Balancer (load balancing
+ // scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, -
+ // Internal proxy Network Load Balancer (load balancing scheme
+ // INTERNAL_MANAGED), - Regional internal Application Load Balancer (load
+ // balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies
+ // and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED).
+ //
+ // Possible values:
+ // "IPV4_ONLY" - Only send IPv4 traffic to the backends of the Backend
+ // Service (Instance Group, Managed Instance Group, Network Endpoint Group)
+ // regardless of traffic from the client to the proxy. Only IPv4 health-checks
+ // are used to check the health of the backends. This is the default setting.
+ // "IPV6_ONLY" - Only send IPv6 traffic to the backends of the Backend
+ // Service (Instance Group, Managed Instance Group, Network Endpoint Group)
+ // regardless of traffic from the client to the proxy. Only IPv6 health-checks
+ // are used to check the health of the backends.
+ // "IP_ADDRESS_SELECTION_POLICY_UNSPECIFIED" - Unspecified IP address
+ // selection policy.
+ // "PREFER_IPV6" - Prioritize the connection to the endpoints IPv6 address
+ // over its IPv4 address (provided there is a healthy IPv6 address).
+ IpAddressSelectionPolicy string `json:"ipAddressSelectionPolicy,omitempty"`
// Kind: [Output Only] Type of resource. Always compute#backendService for
// backend services.
Kind string `json:"kind,omitempty"`
@@ -6795,8 +6836,8 @@ type BulkInsertInstanceResource struct {
// InstanceProperties: The instance properties defining the VM instances to be
// created. Required if sourceInstanceTemplate is not provided.
InstanceProperties *InstanceProperties `json:"instanceProperties,omitempty"`
- // LocationPolicy: Policy for chosing target zone. For more information, see
- // Create VMs in bulk .
+ // LocationPolicy: Policy for choosing target zone. For more information, see
+ // Create VMs in bulk.
LocationPolicy *LocationPolicy `json:"locationPolicy,omitempty"`
// MinCount: The minimum number of instances to create. If no min_count is
// specified then count is used as the default value. If min_count instances
@@ -7050,6 +7091,10 @@ type Commitment struct {
Category string `json:"category,omitempty"`
// CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text format.
CreationTimestamp string `json:"creationTimestamp,omitempty"`
+ // CustomEndTimestamp: [Input Only] Optional, specifies the CUD end time
+ // requested by the customer in RFC3339 text format. Needed when the customer
+ // wants CUD's end date is later than the start date + term duration.
+ CustomEndTimestamp string `json:"customEndTimestamp,omitempty"`
// Description: An optional description of this resource. Provide this property
// when you create the resource.
Description string `json:"description,omitempty"`
@@ -7095,6 +7140,8 @@ type Commitment struct {
Region string `json:"region,omitempty"`
// Reservations: List of create-on-create reservations for this commitment.
Reservations []*Reservation `json:"reservations,omitempty"`
+ // ResourceStatus: [Output Only] Status information for Commitment resource.
+ ResourceStatus *CommitmentResourceStatus `json:"resourceStatus,omitempty"`
// Resources: A list of commitment amounts for particular resources. Note that
// VCPU and MEMORY resource commitments must occur together.
Resources []*ResourceCommitment `json:"resources,omitempty"`
@@ -7478,6 +7525,33 @@ func (s CommitmentListWarningData) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+// CommitmentResourceStatus: [Output Only] Contains output only fields.
+type CommitmentResourceStatus struct {
+ // CustomTermEligibilityEndTimestamp: [Output Only] Indicates the end time of
+ // customer's eligibility to send custom term requests in RFC3339 text format.
+ // Term extension requests that (not the end time in the request) after this
+ // time will be rejected.
+ CustomTermEligibilityEndTimestamp string `json:"customTermEligibilityEndTimestamp,omitempty"`
+ // ForceSendFields is a list of field names (e.g.
+ // "CustomTermEligibilityEndTimestamp") to unconditionally include in API
+ // requests. By default, fields with empty or default values are omitted from
+ // API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g.
+ // "CustomTermEligibilityEndTimestamp") to include in API requests with the
+ // JSON null value. By default, fields with empty values are omitted from API
+ // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for
+ // more details.
+ NullFields []string `json:"-"`
+}
+
+func (s CommitmentResourceStatus) MarshalJSON() ([]byte, error) {
+ type NoMethod CommitmentResourceStatus
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
type CommitmentsScopedList struct {
// Commitments: [Output Only] A list of commitments contained in this scope.
Commitments []*Commitment `json:"commitments,omitempty"`
@@ -7682,6 +7756,7 @@ type ConfidentialInstanceConfig struct {
// this value.
// "SEV" - AMD Secure Encrypted Virtualization.
// "SEV_SNP" - AMD Secure Encrypted Virtualization - Secure Nested Paging.
+ // "TDX" - Intel Trust Domain eXtension.
ConfidentialInstanceType string `json:"confidentialInstanceType,omitempty"`
// EnableConfidentialCompute: Defines whether the instance should have
// confidential compute enabled.
@@ -12133,832 +12208,6 @@ func (s ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
-type FutureReservation struct {
- // AutoCreatedReservationsDeleteTime: Future timestamp when the FR auto-created
- // reservations will be deleted by Compute Engine. Format of this field must be
- // a valid href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339 value.
- AutoCreatedReservationsDeleteTime string `json:"autoCreatedReservationsDeleteTime,omitempty"`
- // AutoCreatedReservationsDuration: Specifies the duration of auto-created
- // reservations. It represents relative time to future reservation start_time
- // when auto-created reservations will be automatically deleted by Compute
- // Engine. Duration time unit is represented as a count of seconds and
- // fractions of seconds at nanosecond resolution.
- AutoCreatedReservationsDuration *Duration `json:"autoCreatedReservationsDuration,omitempty"`
- // AutoDeleteAutoCreatedReservations: Setting for enabling or disabling
- // automatic deletion for auto-created reservation. If set to true,
- // auto-created reservations will be deleted at Future Reservation's end time
- // (default) or at user's defined timestamp if any of the
- // [auto_created_reservations_delete_time, auto_created_reservations_duration]
- // values is specified. For keeping auto-created reservation indefinitely, this
- // value should be set to false.
- AutoDeleteAutoCreatedReservations bool `json:"autoDeleteAutoCreatedReservations,omitempty"`
- // CreationTimestamp: [Output Only] The creation timestamp for this future
- // reservation in RFC3339 text format.
- CreationTimestamp string `json:"creationTimestamp,omitempty"`
- // Description: An optional description of this resource. Provide this property
- // when you create the future reservation.
- Description string `json:"description,omitempty"`
- // Id: [Output Only] A unique identifier for this future reservation. The
- // server defines this identifier.
- Id uint64 `json:"id,omitempty,string"`
- // Kind: [Output Only] Type of the resource. Always compute#futureReservation
- // for future reservations.
- Kind string `json:"kind,omitempty"`
- // Name: The name of the resource, provided by the client when initially
- // creating the resource. The resource name must be 1-63 characters long, and
- // comply with RFC1035. Specifically, the name must be 1-63 characters long and
- // match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the
- // first character must be a lowercase letter, and all following characters
- // must be a dash, lowercase letter, or digit, except the last character, which
- // cannot be a dash.
- Name string `json:"name,omitempty"`
- // NamePrefix: Name prefix for the reservations to be created at the time of
- // delivery. The name prefix must comply with RFC1035. Maximum allowed length
- // for name prefix is 20. Automatically created reservations name format will
- // be -date-####.
- NamePrefix string `json:"namePrefix,omitempty"`
- // PlanningStatus: Planning state before being submitted for evaluation
- //
- // Possible values:
- // "DRAFT" - Future Reservation is being drafted.
- // "PLANNING_STATUS_UNSPECIFIED"
- // "SUBMITTED" - Future Reservation has been submitted for evaluation by GCP.
- PlanningStatus string `json:"planningStatus,omitempty"`
- // SelfLink: [Output Only] Server-defined fully-qualified URL for this
- // resource.
- SelfLink string `json:"selfLink,omitempty"`
- // SelfLinkWithId: [Output Only] Server-defined URL for this resource with the
- // resource id.
- SelfLinkWithId string `json:"selfLinkWithId,omitempty"`
- // ShareSettings: List of Projects/Folders to share with.
- ShareSettings *ShareSettings `json:"shareSettings,omitempty"`
- // SpecificSkuProperties: Future Reservation configuration to indicate instance
- // properties and total count.
- SpecificSkuProperties *FutureReservationSpecificSKUProperties `json:"specificSkuProperties,omitempty"`
- // Status: [Output only] Status of the Future Reservation
- Status *FutureReservationStatus `json:"status,omitempty"`
- // TimeWindow: Time window for this Future Reservation.
- TimeWindow *FutureReservationTimeWindow `json:"timeWindow,omitempty"`
- // Zone: [Output Only] URL of the Zone where this future reservation resides.
- Zone string `json:"zone,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the server.
- googleapi.ServerResponse `json:"-"`
- // ForceSendFields is a list of field names (e.g.
- // "AutoCreatedReservationsDeleteTime") to unconditionally include in API
- // requests. By default, fields with empty or default values are omitted from
- // API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g.
- // "AutoCreatedReservationsDeleteTime") to include in API requests with the
- // JSON null value. By default, fields with empty values are omitted from API
- // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for
- // more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservation) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservation
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationSpecificSKUProperties struct {
- // InstanceProperties: Properties of the SKU instances being reserved.
- InstanceProperties *AllocationSpecificSKUAllocationReservedInstanceProperties `json:"instanceProperties,omitempty"`
- // SourceInstanceTemplate: The instance template that will be used to populate
- // the ReservedInstanceProperties of the future reservation
- SourceInstanceTemplate string `json:"sourceInstanceTemplate,omitempty"`
- // TotalCount: Total number of instances for which capacity assurance is
- // requested at a future time period.
- TotalCount int64 `json:"totalCount,omitempty,string"`
- // ForceSendFields is a list of field names (e.g. "InstanceProperties") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "InstanceProperties") to include
- // in API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationSpecificSKUProperties) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationSpecificSKUProperties
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatus: [Output only] Represents status related to the
-// future reservation.
-type FutureReservationStatus struct {
- // AmendmentStatus: [Output Only] The current status of the requested
- // amendment.
- //
- // Possible values:
- // "AMENDMENT_APPROVED" - The requested amendment to the Future Resevation
- // has been approved and applied by GCP.
- // "AMENDMENT_DECLINED" - The requested amendment to the Future Reservation
- // has been declined by GCP and the original state was restored.
- // "AMENDMENT_IN_REVIEW" - The requested amendment to the Future Reservation
- // is currently being reviewd by GCP.
- // "AMENDMENT_STATUS_UNSPECIFIED"
- AmendmentStatus string `json:"amendmentStatus,omitempty"`
- // AutoCreatedReservations: Fully qualified urls of the automatically created
- // reservations at start_time.
- AutoCreatedReservations []string `json:"autoCreatedReservations,omitempty"`
- // ExistingMatchingUsageInfo: [Output Only] Represents the existing matching
- // usage for the future reservation.
- ExistingMatchingUsageInfo *FutureReservationStatusExistingMatchingUsageInfo `json:"existingMatchingUsageInfo,omitempty"`
- // FulfilledCount: This count indicates the fulfilled capacity so far. This is
- // set during "PROVISIONING" state. This count also includes capacity delivered
- // as part of existing matching reservations.
- FulfilledCount int64 `json:"fulfilledCount,omitempty,string"`
- // LastKnownGoodState: [Output Only] This field represents the future
- // reservation before an amendment was requested. If the amendment is declined,
- // the Future Reservation will be reverted to the last known good state. The
- // last known good state is not set when updating a future reservation whose
- // Procurement Status is DRAFTING.
- LastKnownGoodState *FutureReservationStatusLastKnownGoodState `json:"lastKnownGoodState,omitempty"`
- // LockTime: Time when Future Reservation would become LOCKED, after which no
- // modifications to Future Reservation will be allowed. Applicable only after
- // the Future Reservation is in the APPROVED state. The lock_time is an RFC3339
- // string. The procurement_status will transition to PROCURING state at this
- // time.
- LockTime string `json:"lockTime,omitempty"`
- // ProcurementStatus: Current state of this Future Reservation
- //
- // Possible values:
- // "APPROVED" - Future reservation is approved by GCP.
- // "CANCELLED" - Future reservation is cancelled by the customer.
- // "COMMITTED" - Future reservation is committed by the customer.
- // "DECLINED" - Future reservation is rejected by GCP.
- // "DRAFTING" - Related status for PlanningStatus.Draft. Transitions to
- // PENDING_APPROVAL upon user submitting FR.
- // "FAILED" - Future reservation failed. No additional reservations were
- // provided.
- // "FAILED_PARTIALLY_FULFILLED" - Future reservation is partially fulfilled.
- // Additional reservations were provided but did not reach total_count reserved
- // instance slots.
- // "FULFILLED" - Future reservation is fulfilled completely.
- // "PENDING_AMENDMENT_APPROVAL" - An Amendment to the Future Reservation has
- // been requested. If the Amendment is declined, the Future Reservation will be
- // restored to the last known good state.
- // "PENDING_APPROVAL" - Future reservation is pending approval by GCP.
- // "PROCUREMENT_STATUS_UNSPECIFIED"
- // "PROCURING" - Future reservation is being procured by GCP. Beyond this
- // point, Future reservation is locked and no further modifications are
- // allowed.
- // "PROVISIONING" - Future reservation capacity is being provisioned. This
- // state will be entered after start_time, while reservations are being created
- // to provide total_count reserved instance slots. This state will not persist
- // past start_time + 24h.
- ProcurementStatus string `json:"procurementStatus,omitempty"`
- SpecificSkuProperties *FutureReservationStatusSpecificSKUProperties `json:"specificSkuProperties,omitempty"`
- // ForceSendFields is a list of field names (e.g. "AmendmentStatus") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "AmendmentStatus") to include in
- // API requests with the JSON null value. By default, fields with empty values
- // are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatus) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatus
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatusExistingMatchingUsageInfo: [Output Only] Represents
-// the existing matching usage for the future reservation.
-type FutureReservationStatusExistingMatchingUsageInfo struct {
- // Count: Count to represent min(FR total_count,
- // matching_reserved_capacity+matching_unreserved_instances)
- Count int64 `json:"count,omitempty,string"`
- // Timestamp: Timestamp when the matching usage was calculated
- Timestamp string `json:"timestamp,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Count") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Count") to include in API
- // requests with the JSON null value. By default, fields with empty values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatusExistingMatchingUsageInfo) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatusExistingMatchingUsageInfo
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatusLastKnownGoodState: The state that the future
-// reservation will be reverted to should the amendment be declined.
-type FutureReservationStatusLastKnownGoodState struct {
- // Description: [Output Only] The description of the FutureReservation before
- // an amendment was requested.
- Description string `json:"description,omitempty"`
- // ExistingMatchingUsageInfo: [Output Only] Represents the matching usage for
- // the future reservation before an amendment was requested.
- ExistingMatchingUsageInfo *FutureReservationStatusExistingMatchingUsageInfo `json:"existingMatchingUsageInfo,omitempty"`
- FutureReservationSpecs *FutureReservationStatusLastKnownGoodStateFutureReservationSpecs `json:"futureReservationSpecs,omitempty"`
- // LockTime: [Output Only] The lock time of the FutureReservation before an
- // amendment was requested.
- LockTime string `json:"lockTime,omitempty"`
- // NamePrefix: [Output Only] The name prefix of the Future Reservation before
- // an amendment was requested.
- NamePrefix string `json:"namePrefix,omitempty"`
- // ProcurementStatus: [Output Only] The status of the last known good state for
- // the Future Reservation.
- //
- // Possible values:
- // "APPROVED" - Future reservation is approved by GCP.
- // "CANCELLED" - Future reservation is cancelled by the customer.
- // "COMMITTED" - Future reservation is committed by the customer.
- // "DECLINED" - Future reservation is rejected by GCP.
- // "DRAFTING" - Related status for PlanningStatus.Draft. Transitions to
- // PENDING_APPROVAL upon user submitting FR.
- // "FAILED" - Future reservation failed. No additional reservations were
- // provided.
- // "FAILED_PARTIALLY_FULFILLED" - Future reservation is partially fulfilled.
- // Additional reservations were provided but did not reach total_count reserved
- // instance slots.
- // "FULFILLED" - Future reservation is fulfilled completely.
- // "PENDING_AMENDMENT_APPROVAL" - An Amendment to the Future Reservation has
- // been requested. If the Amendment is declined, the Future Reservation will be
- // restored to the last known good state.
- // "PENDING_APPROVAL" - Future reservation is pending approval by GCP.
- // "PROCUREMENT_STATUS_UNSPECIFIED"
- // "PROCURING" - Future reservation is being procured by GCP. Beyond this
- // point, Future reservation is locked and no further modifications are
- // allowed.
- // "PROVISIONING" - Future reservation capacity is being provisioned. This
- // state will be entered after start_time, while reservations are being created
- // to provide total_count reserved instance slots. This state will not persist
- // past start_time + 24h.
- ProcurementStatus string `json:"procurementStatus,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Description") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Description") to include in API
- // requests with the JSON null value. By default, fields with empty values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatusLastKnownGoodState) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatusLastKnownGoodState
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatusLastKnownGoodStateFutureReservationSpecs: The
-// properties of the last known good state for the Future Reservation.
-type FutureReservationStatusLastKnownGoodStateFutureReservationSpecs struct {
- // ShareSettings: [Output Only] The previous share settings of the Future
- // Reservation.
- ShareSettings *ShareSettings `json:"shareSettings,omitempty"`
- // SpecificSkuProperties: [Output Only] The previous instance related
- // properties of the Future Reservation.
- SpecificSkuProperties *FutureReservationSpecificSKUProperties `json:"specificSkuProperties,omitempty"`
- // TimeWindow: [Output Only] The previous time window of the Future
- // Reservation.
- TimeWindow *FutureReservationTimeWindow `json:"timeWindow,omitempty"`
- // ForceSendFields is a list of field names (e.g. "ShareSettings") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "ShareSettings") to include in API
- // requests with the JSON null value. By default, fields with empty values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatusLastKnownGoodStateFutureReservationSpecs) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatusLastKnownGoodStateFutureReservationSpecs
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationStatusSpecificSKUProperties: Properties to be set for the
-// Future Reservation.
-type FutureReservationStatusSpecificSKUProperties struct {
- // SourceInstanceTemplateId: ID of the instance template used to populate the
- // Future Reservation properties.
- SourceInstanceTemplateId string `json:"sourceInstanceTemplateId,omitempty"`
- // ForceSendFields is a list of field names (e.g. "SourceInstanceTemplateId")
- // to unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "SourceInstanceTemplateId") to
- // include in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationStatusSpecificSKUProperties) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationStatusSpecificSKUProperties
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationTimeWindow struct {
- Duration *Duration `json:"duration,omitempty"`
- EndTime string `json:"endTime,omitempty"`
- // StartTime: Start time of the Future Reservation. The start_time is an
- // RFC3339 string.
- StartTime string `json:"startTime,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Duration") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Duration") to include in API
- // requests with the JSON null value. By default, fields with empty values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationTimeWindow) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationTimeWindow
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationsAggregatedListResponse: Contains a list of future
-// reservations.
-type FutureReservationsAggregatedListResponse struct {
- Etag string `json:"etag,omitempty"`
- // Id: [Output Only] Unique identifier for the resource; defined by the server.
- Id string `json:"id,omitempty"`
- // Items: A list of Future reservation resources.
- Items map[string]FutureReservationsScopedList `json:"items,omitempty"`
- // Kind: [Output Only] Type of resource. Always
- // compute#futureReservationsAggregatedListResponse for future resevation
- // aggregated list response.
- Kind string `json:"kind,omitempty"`
- // NextPageToken: [Output Only] This token allows you to get the next page of
- // results for list requests. If the number of results is larger than
- // maxResults, use the nextPageToken as a value for the query parameter
- // pageToken in the next list request. Subsequent list requests will have their
- // own nextPageToken to continue paging through the results.
- NextPageToken string `json:"nextPageToken,omitempty"`
- // SelfLink: [Output Only] Server-defined URL for this resource.
- SelfLink string `json:"selfLink,omitempty"`
- // Unreachables: [Output Only] Unreachable resources.
- Unreachables []string `json:"unreachables,omitempty"`
- // Warning: [Output Only] Informational warning message.
- Warning *FutureReservationsAggregatedListResponseWarning `json:"warning,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the server.
- googleapi.ServerResponse `json:"-"`
- // ForceSendFields is a list of field names (e.g. "Etag") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Etag") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsAggregatedListResponse) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsAggregatedListResponse
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationsAggregatedListResponseWarning: [Output Only] Informational
-// warning message.
-type FutureReservationsAggregatedListResponseWarning struct {
- // Code: [Output Only] A warning code, if applicable. For example, Compute
- // Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
- //
- // Possible values:
- // "CLEANUP_FAILED" - Warning about failed cleanup of transient changes made
- // by a failed operation.
- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was created.
- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the resources
- // has a type marked as deprecated
- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk that is
- // larger than image size.
- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the
- // resources has a type marked as experimental
- // "EXTERNAL_API_WARNING" - Warning that is present in an external api call
- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been
- // overridden. Deprecated unused field.
- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an injected
- // kernel, which is deprecated.
- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV backend
- // service is associated with a health check that is not of type
- // HTTP/HTTPS/HTTP2.
- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a
- // exceedingly large number of resources
- // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to list
- // overhead quota exceed which captures the amount of resources filtered out by
- // user-defined list filter.
- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type
- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is not
- // assigned to an instance on the network.
- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ip
- // forward.
- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's nextHopInstance
- // URL refers to an instance that does not have an ipv6 interface on the same
- // network as the route.
- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL refers to
- // an instance that does not exist.
- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance URL
- // refers to an instance that is not on the same network as the route.
- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not have a
- // status of RUNNING.
- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to continue
- // the process despite the mentioned error.
- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list page.
- // "PARTIAL_SUCCESS" - Success is reported, but some results may be missing
- // due to errors
- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource that
- // requires a TOS they have not accepted.
- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a resource is
- // in use.
- // "RESOURCE_NOT_DELETED" - One or more of the resources set to auto-delete
- // could not be deleted because they were in use.
- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is
- // ignored.
- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in instance
- // group manager is valid as such, but its application does not make a lot of
- // sense, because it allows only single instance in instance group.
- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema are
- // present
- // "UNREACHABLE" - A given scope cannot be reached.
- Code string `json:"code,omitempty"`
- // Data: [Output Only] Metadata about this warning in key: value format. For
- // example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
- Data []*FutureReservationsAggregatedListResponseWarningData `json:"data,omitempty"`
- // Message: [Output Only] A human-readable description of the warning code.
- Message string `json:"message,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Code") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Code") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsAggregatedListResponseWarning) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsAggregatedListResponseWarning
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsAggregatedListResponseWarningData struct {
- // Key: [Output Only] A key that provides more detail on the warning being
- // returned. For example, for warnings where there are no results in a list
- // request for a particular zone, this key might be scope and the key value
- // might be the zone name. Other examples might be a key indicating a
- // deprecated resource and a suggested replacement, or a warning about invalid
- // network settings (for example, if an instance attempts to perform IP
- // forwarding but is not enabled for IP forwarding).
- Key string `json:"key,omitempty"`
- // Value: [Output Only] A warning data value corresponding to the key.
- Value string `json:"value,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Key") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Key") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsAggregatedListResponseWarningData) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsAggregatedListResponseWarningData
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsListResponse struct {
- Etag string `json:"etag,omitempty"`
- // Id: [Output Only] The unique identifier for the resource. This identifier is
- // defined by the server.
- Id string `json:"id,omitempty"`
- // Items: [Output Only] A list of future reservation resources.
- Items []*FutureReservation `json:"items,omitempty"`
- // Kind: [Output Only] Type of resource.Always
- // compute#FutureReservationsListResponse for lists of reservations
- Kind string `json:"kind,omitempty"`
- // NextPageToken: [Output Only] This token allows you to get the next page of
- // results for list requests. If the number of results is larger than
- // maxResults, use the nextPageToken as a value for the query parameter
- // pageToken in the next list request. Subsequent list requests will have their
- // own nextPageToken to continue paging through the results.
- NextPageToken string `json:"nextPageToken,omitempty"`
- // SelfLink: [Output Only] Server-defined URL for this resource.
- SelfLink string `json:"selfLink,omitempty"`
- // Unreachables: [Output Only] Unreachable resources.
- Unreachables []string `json:"unreachables,omitempty"`
- // Warning: [Output Only] Informational warning message.
- Warning *FutureReservationsListResponseWarning `json:"warning,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the server.
- googleapi.ServerResponse `json:"-"`
- // ForceSendFields is a list of field names (e.g. "Etag") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Etag") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsListResponse) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsListResponse
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationsListResponseWarning: [Output Only] Informational warning
-// message.
-type FutureReservationsListResponseWarning struct {
- // Code: [Output Only] A warning code, if applicable. For example, Compute
- // Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
- //
- // Possible values:
- // "CLEANUP_FAILED" - Warning about failed cleanup of transient changes made
- // by a failed operation.
- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was created.
- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the resources
- // has a type marked as deprecated
- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk that is
- // larger than image size.
- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the
- // resources has a type marked as experimental
- // "EXTERNAL_API_WARNING" - Warning that is present in an external api call
- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been
- // overridden. Deprecated unused field.
- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an injected
- // kernel, which is deprecated.
- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV backend
- // service is associated with a health check that is not of type
- // HTTP/HTTPS/HTTP2.
- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a
- // exceedingly large number of resources
- // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to list
- // overhead quota exceed which captures the amount of resources filtered out by
- // user-defined list filter.
- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type
- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is not
- // assigned to an instance on the network.
- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ip
- // forward.
- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's nextHopInstance
- // URL refers to an instance that does not have an ipv6 interface on the same
- // network as the route.
- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL refers to
- // an instance that does not exist.
- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance URL
- // refers to an instance that is not on the same network as the route.
- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not have a
- // status of RUNNING.
- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to continue
- // the process despite the mentioned error.
- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list page.
- // "PARTIAL_SUCCESS" - Success is reported, but some results may be missing
- // due to errors
- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource that
- // requires a TOS they have not accepted.
- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a resource is
- // in use.
- // "RESOURCE_NOT_DELETED" - One or more of the resources set to auto-delete
- // could not be deleted because they were in use.
- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is
- // ignored.
- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in instance
- // group manager is valid as such, but its application does not make a lot of
- // sense, because it allows only single instance in instance group.
- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema are
- // present
- // "UNREACHABLE" - A given scope cannot be reached.
- Code string `json:"code,omitempty"`
- // Data: [Output Only] Metadata about this warning in key: value format. For
- // example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
- Data []*FutureReservationsListResponseWarningData `json:"data,omitempty"`
- // Message: [Output Only] A human-readable description of the warning code.
- Message string `json:"message,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Code") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Code") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsListResponseWarning) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsListResponseWarning
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsListResponseWarningData struct {
- // Key: [Output Only] A key that provides more detail on the warning being
- // returned. For example, for warnings where there are no results in a list
- // request for a particular zone, this key might be scope and the key value
- // might be the zone name. Other examples might be a key indicating a
- // deprecated resource and a suggested replacement, or a warning about invalid
- // network settings (for example, if an instance attempts to perform IP
- // forwarding but is not enabled for IP forwarding).
- Key string `json:"key,omitempty"`
- // Value: [Output Only] A warning data value corresponding to the key.
- Value string `json:"value,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Key") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Key") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsListResponseWarningData) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsListResponseWarningData
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsScopedList struct {
- // FutureReservations: A list of future reservations contained in this scope.
- FutureReservations []*FutureReservation `json:"futureReservations,omitempty"`
- // Warning: Informational warning which replaces the list of future
- // reservations when the list is empty.
- Warning *FutureReservationsScopedListWarning `json:"warning,omitempty"`
- // ForceSendFields is a list of field names (e.g. "FutureReservations") to
- // unconditionally include in API requests. By default, fields with empty or
- // default values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "FutureReservations") to include
- // in API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsScopedList) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsScopedList
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-// FutureReservationsScopedListWarning: Informational warning which replaces
-// the list of future reservations when the list is empty.
-type FutureReservationsScopedListWarning struct {
- // Code: [Output Only] A warning code, if applicable. For example, Compute
- // Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
- //
- // Possible values:
- // "CLEANUP_FAILED" - Warning about failed cleanup of transient changes made
- // by a failed operation.
- // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was created.
- // "DEPRECATED_TYPE_USED" - When deploying and at least one of the resources
- // has a type marked as deprecated
- // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk that is
- // larger than image size.
- // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the
- // resources has a type marked as experimental
- // "EXTERNAL_API_WARNING" - Warning that is present in an external api call
- // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been
- // overridden. Deprecated unused field.
- // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an injected
- // kernel, which is deprecated.
- // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV backend
- // service is associated with a health check that is not of type
- // HTTP/HTTPS/HTTP2.
- // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a
- // exceedingly large number of resources
- // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to list
- // overhead quota exceed which captures the amount of resources filtered out by
- // user-defined list filter.
- // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type
- // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is not
- // assigned to an instance on the network.
- // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot ip
- // forward.
- // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's nextHopInstance
- // URL refers to an instance that does not have an ipv6 interface on the same
- // network as the route.
- // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL refers to
- // an instance that does not exist.
- // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance URL
- // refers to an instance that is not on the same network as the route.
- // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not have a
- // status of RUNNING.
- // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to continue
- // the process despite the mentioned error.
- // "NO_RESULTS_ON_PAGE" - No results are present on a particular list page.
- // "PARTIAL_SUCCESS" - Success is reported, but some results may be missing
- // due to errors
- // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource that
- // requires a TOS they have not accepted.
- // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a resource is
- // in use.
- // "RESOURCE_NOT_DELETED" - One or more of the resources set to auto-delete
- // could not be deleted because they were in use.
- // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is
- // ignored.
- // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in instance
- // group manager is valid as such, but its application does not make a lot of
- // sense, because it allows only single instance in instance group.
- // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema are
- // present
- // "UNREACHABLE" - A given scope cannot be reached.
- Code string `json:"code,omitempty"`
- // Data: [Output Only] Metadata about this warning in key: value format. For
- // example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
- Data []*FutureReservationsScopedListWarningData `json:"data,omitempty"`
- // Message: [Output Only] A human-readable description of the warning code.
- Message string `json:"message,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Code") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Code") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsScopedListWarning) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsScopedListWarning
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
-type FutureReservationsScopedListWarningData struct {
- // Key: [Output Only] A key that provides more detail on the warning being
- // returned. For example, for warnings where there are no results in a list
- // request for a particular zone, this key might be scope and the key value
- // might be the zone name. Other examples might be a key indicating a
- // deprecated resource and a suggested replacement, or a warning about invalid
- // network settings (for example, if an instance attempts to perform IP
- // forwarding but is not enabled for IP forwarding).
- Key string `json:"key,omitempty"`
- // Value: [Output Only] A warning data value corresponding to the key.
- Value string `json:"value,omitempty"`
- // ForceSendFields is a list of field names (e.g. "Key") to unconditionally
- // include in API requests. By default, fields with empty or default values are
- // omitted from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
- // details.
- ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "Key") to include in API requests
- // with the JSON null value. By default, fields with empty values are omitted
- // from API requests. See
- // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
- NullFields []string `json:"-"`
-}
-
-func (s FutureReservationsScopedListWarningData) MarshalJSON() ([]byte, error) {
- type NoMethod FutureReservationsScopedListWarningData
- return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
-}
-
type GRPCHealthCheck struct {
// GrpcServiceName: The gRPC service name for the health check. This field is
// optional. The value of grpc_service_name has the following meanings by
@@ -14430,7 +13679,14 @@ type HealthStatus struct {
// IpAddress: For target pool based Network Load Balancing, it indicates the
// forwarding rule's IP address assigned to this instance. For other types of
// load balancing, the field indicates VM internal ip.
- IpAddress string `json:"ipAddress,omitempty"`
+ IpAddress string `json:"ipAddress,omitempty"`
+ Ipv6Address string `json:"ipv6Address,omitempty"`
+ // Ipv6HealthState: Health state of the IPv6 address of the instance.
+ //
+ // Possible values:
+ // "HEALTHY"
+ // "UNHEALTHY"
+ Ipv6HealthState string `json:"ipv6HealthState,omitempty"`
// Port: The named port of the instance group, not necessarily the port that is
// health-checked.
Port int64 `json:"port,omitempty"`
@@ -21680,10 +20936,10 @@ type Interconnect struct {
AdminEnabled bool `json:"adminEnabled,omitempty"`
// AvailableFeatures: [Output only] List of features available for this
// Interconnect connection, which can take one of the following values: -
- // MACSEC If present then the Interconnect connection is provisioned on MACsec
- // capable hardware ports. If not present then the Interconnect connection is
- // provisioned on non-MACsec capable ports and MACsec isn't supported and
- // enabling MACsec fails.
+ // IF_MACSEC If present then the Interconnect connection is provisioned on
+ // MACsec capable hardware ports. If not present then the Interconnect
+ // connection is provisioned on non-MACsec capable ports and MACsec isn't
+ // supported and enabling MACsec fails.
//
// Possible values:
// "IF_MACSEC" - Media Access Control security (MACsec)
@@ -21807,7 +21063,7 @@ type Interconnect struct {
RemoteLocation string `json:"remoteLocation,omitempty"`
// RequestedFeatures: Optional. List of features requested for this
// Interconnect connection, which can take one of the following values: -
- // MACSEC If specified then the connection is created on MACsec capable
+ // IF_MACSEC If specified then the connection is created on MACsec capable
// hardware ports. If not specified, the default value is false, which
// allocates non-MACsec capable ports first if available. This parameter can be
// provided only with Interconnect INSERT. It isn't valid for Interconnect
@@ -27686,6 +26942,10 @@ type NetworkEndpointGroupPscData struct {
// for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as
// an endpoint in L7 PSC-XLB.
ConsumerPscAddress string `json:"consumerPscAddress,omitempty"`
+ // ProducerPort: The psc producer port is used to connect PSC NEG with specific
+ // port on the PSC Producer side; should only be used for the
+ // PRIVATE_SERVICE_CONNECT NEG type
+ ProducerPort int64 `json:"producerPort,omitempty"`
// PscConnectionId: [Output Only] The PSC connection id of the PSC Network
// Endpoint Group Consumer.
PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"`
@@ -28192,7 +27452,8 @@ type NetworkInterface struct {
//
// Possible values:
// "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 addresses.
- // "IPV4_ONLY" - The network interface will be assigned IPv4 address.
+ // "IPV4_ONLY" - The network interface will only be assigned IPv4 addresses.
+ // "IPV6_ONLY" - The network interface will only be assigned IPv6 addresses.
StackType string `json:"stackType,omitempty"`
// Subnetwork: The URL of the Subnetwork resource for this instance. If the
// network resource is in legacy mode, do not specify this field. If the
@@ -31000,7 +30261,7 @@ type Operation struct {
TargetId uint64 `json:"targetId,omitempty,string"`
// TargetLink: [Output Only] The URL of the resource that the operation
// modifies. For operations related to creating a snapshot, this points to the
- // persistent disk that the snapshot was created from.
+ // disk that the snapshot was created from.
TargetLink string `json:"targetLink,omitempty"`
// User: [Output Only] User who requested the operation, for example:
// `[email protected]` or `alice_smith_identifier
@@ -42185,6 +41446,17 @@ type ServiceAttachment struct {
// loadBalancingScheme INTERNAL* that is serving the endpoint identified by
// this service attachment.
ProducerForwardingRule string `json:"producerForwardingRule,omitempty"`
+ // PropagatedConnectionLimit: The number of consumer spokes that connected
+ // Private Service Connect endpoints can be propagated to through Network
+ // Connectivity Center. This limit lets the service producer limit how many
+ // propagated Private Service Connect connections can be established to this
+ // service attachment from a single consumer. If the connection preference of
+ // the service attachment is ACCEPT_MANUAL, the limit applies to each project
+ // or network that is listed in the consumer accept list. If the connection
+ // preference of the service attachment is ACCEPT_AUTOMATIC, the limit applies
+ // to each project that contains a connected endpoint. If unspecified, the
+ // default propagated connection limit is 250.
+ PropagatedConnectionLimit int64 `json:"propagatedConnectionLimit,omitempty"`
// PscServiceAttachmentId: [Output Only] An 128-bit global unique ID of the PSC
// service attachment.
PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"`
@@ -42393,6 +41665,10 @@ type ServiceAttachmentConnectedEndpoint struct {
ConsumerNetwork string `json:"consumerNetwork,omitempty"`
// Endpoint: The url of a connected endpoint.
Endpoint string `json:"endpoint,omitempty"`
+ // PropagatedConnectionCount: The number of consumer Network Connectivity
+ // Center spokes that the connected Private Service Connect endpoint has
+ // propagated to.
+ PropagatedConnectionCount int64 `json:"propagatedConnectionCount,omitempty"`
// PscConnectionId: The PSC connection id of the connected endpoint.
PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"`
// Status: The status of a connected endpoint to this service attachment.
@@ -46388,8 +45664,8 @@ type Subnetwork struct {
// Id: [Output Only] The unique identifier for the resource. This identifier is
// defined by the server.
Id uint64 `json:"id,omitempty,string"`
- // InternalIpv6Prefix: [Output Only] The internal IPv6 address range that is
- // assigned to this subnetwork.
+ // InternalIpv6Prefix: The internal IPv6 address range that is owned by this
+ // subnetwork.
InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"`
// IpCidrRange: The range of internal addresses that are owned by this
// subnetwork. Provide this property when you create the subnetwork. For
@@ -46503,6 +45779,7 @@ type Subnetwork struct {
// "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6
// addresses.
// "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 addresses.
+ // "IPV6_ONLY" - New VMs in this subnet will only be assigned IPv6 addresses.
StackType string `json:"stackType,omitempty"`
// State: [Output Only] The state of the subnetwork, which can be one of the
// following values: READY: Subnetwork is created and ready to use DRAINING:
@@ -51446,10 +50723,10 @@ func (s UpcomingMaintenance) MarshalJSON() ([]byte, error) {
// Director, see the Traffic Director features: Routing and traffic management
// table. This resource defines mappings from hostnames and URL paths to either
// a backend service or a backend bucket. To use the global urlMaps resource,
-// the backend service must have a loadBalancingScheme of either EXTERNAL or
-// INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend
-// service must have a loadBalancingScheme of INTERNAL_MANAGED. For more
-// information, read URL Map Concepts.
+// the backend service must have a loadBalancingScheme of either EXTERNAL,
+// EXTERNAL_MANAGED, or INTERNAL_SELF_MANAGED. To use the regionUrlMaps
+// resource, the backend service must have a loadBalancingScheme of
+// INTERNAL_MANAGED. For more information, read URL Map Concepts.
type UrlMap struct {
// CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text format.
CreationTimestamp string `json:"creationTimestamp,omitempty"`
@@ -52328,6 +51605,7 @@ type UsableSubnetwork struct {
// "IPV4_IPV6" - New VMs in this subnet can have both IPv4 and IPv6
// addresses.
// "IPV4_ONLY" - New VMs in this subnet will only be assigned IPv4 addresses.
+ // "IPV6_ONLY" - New VMs in this subnet will only be assigned IPv6 addresses.
StackType string `json:"stackType,omitempty"`
// Subnetwork: Subnetwork URL.
Subnetwork string `json:"subnetwork,omitempty"`
diff --git a/vendor/google.golang.org/api/compute/v1/compute2-gen.go b/vendor/google.golang.org/api/compute/v1/compute2-gen.go
index 8b9c7f13e7445..f66a5b58a344c 100644
--- a/vendor/google.golang.org/api/compute/v1/compute2-gen.go
+++ b/vendor/google.golang.org/api/compute/v1/compute2-gen.go
@@ -5413,8 +5413,8 @@ type BackendServicesListUsableCall struct {
header_ http.Header
}
-// ListUsable: Retrieves an aggregated list of all usable backend services in
-// the specified project.
+// ListUsable: Retrieves a list of all usable backend services in the specified
+// project.
//
// - project: Project ID for this request.
func (r *BackendServicesService) ListUsable(project string) *BackendServicesListUsableCall {
@@ -14168,129 +14168,145 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat
return ret, nil
}
-type FutureReservationsAggregatedListCall struct {
- s *Service
- project string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
+type GlobalAddressesDeleteCall struct {
+ s *Service
+ project string
+ address string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// AggregatedList: Retrieves an aggregated list of future reservations. To
-// prevent failure, Google recommends that you set the `returnPartialSuccess`
-// parameter to `true`.
+// Delete: Deletes the specified address resource.
//
+// - address: Name of the address resource to delete.
// - project: Project ID for this request.
-func (r *FutureReservationsService) AggregatedList(project string) *FutureReservationsAggregatedListCall {
- c := &FutureReservationsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall {
+ c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
+ c.address = address
return c
}
-// Filter sets the optional parameter "filter": A filter expression that
-// filters resources listed in the response. Most Compute resources support two
-// types of filter expressions: expressions that support regular expressions
-// and expressions that follow API improvement proposal AIP-160. These two
-// types of filter expressions cannot be mixed in one request. If you want to
-// use AIP-160, your expression must specify the field name, an operator, and
-// the value that you want to use for filtering. The value must be a string, a
-// number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`,
-// `>=` or `:`. For example, if you are filtering Compute Engine instances, you
-// can exclude instances named `example-instance` by specifying `name !=
-// example-instance`. The `:*` comparison can be used to test whether a key has
-// been defined. For example, to find all objects with `owner` label use: ```
-// labels.owner:* ``` You can also filter nested fields. For example, you could
-// specify `scheduling.automaticRestart = false` to include instances only if
-// they are not scheduled for automatic restarts. You can use filtering on
-// nested fields to filter based on resource labels. To filter on multiple
-// expressions, provide each separate expression within parentheses. For
-// example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel
-// Skylake") ``` By default, each expression is an `AND` expression. However,
-// you can include `AND` and `OR` expressions explicitly. For example: ```
-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
-// (scheduling.automaticRestart = true) ``` If you want to use a regular
-// expression, use the `eq` (equal) or `ne` (not equal) operator against a
-// single un-parenthesized expression with or without quotes or against
-// multiple parenthesized expressions. Examples: `fieldname eq unquoted
-// literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted
-// literal" `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal
-// value is interpreted as a regular expression using Google RE2 library
-// syntax. The literal value must match the entire field. For example, to
-// filter for instances that do not end with name "instance", you would use
-// `name ne .*instance`. You cannot combine constraints on multiple fields
-// using regular expressions.
-func (c *FutureReservationsAggregatedListCall) Filter(filter string) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("filter", filter)
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall {
+ c.urlParams_.Set("requestId", requestId)
return c
}
-// IncludeAllScopes sets the optional parameter "includeAllScopes": Indicates
-// whether every visible scope for each scope type (zone, region, global)
-// should be included in the response. For new resource types added after this
-// field, the flag has no effect as new resource types will always include
-// every visible scope for each scope type in response. For resource types
-// which predate this field, if this flag is omitted or false, only scopes of
-// the scope types where the resource type is expected to be found will be
-// included.
-func (c *FutureReservationsAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes))
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
-// MaxResults sets the optional parameter "maxResults": The maximum number of
-// results per page that should be returned. If the number of available results
-// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
-// can be used to get the next page of results in subsequent list requests.
-// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *FutureReservationsAggregatedListCall) MaxResults(maxResults int64) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
+// Context sets the context to be used in this call's Do method.
+func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall {
+ c.ctx_ = ctx
return c
}
-// OrderBy sets the optional parameter "orderBy": Sorts list results by a
-// certain order. By default, results are returned in alphanumerical order
-// based on the resource name. You can also sort results in descending order
-// based on the creation timestamp using `orderBy="creationTimestamp desc".
-// This sorts results based on the `creationTimestamp` field in reverse
-// chronological order (newest result first). Use this to sort resources like
-// operations so that the newest operation is returned first. Currently, only
-// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *FutureReservationsAggregatedListCall) OrderBy(orderBy string) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("orderBy", orderBy)
- return c
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *GlobalAddressesDeleteCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
}
-// PageToken sets the optional parameter "pageToken": Specifies a page token to
-// use. Set `pageToken` to the `nextPageToken` returned by a previous list
-// request to get the next page of results.
-func (c *FutureReservationsAggregatedListCall) PageToken(pageToken string) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
+func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("DELETE", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "address": c.address,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess":
-// Opt-in for partial success behavior which provides partial results in case
-// of failure. The default value is false. For example, when partial success
-// behavior is enabled, aggregatedList for a single zone scope either returns
-// all resources in the zone or no resources, with an error code.
-func (c *FutureReservationsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
- return c
+// Do executes the "compute.globalAddresses.delete" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
}
-// ServiceProjectNumber sets the optional parameter "serviceProjectNumber": The
-// Shared VPC service project id or service project number for which aggregated
-// list request is invoked for subnetworks list-usable api.
-func (c *FutureReservationsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *FutureReservationsAggregatedListCall {
- c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber))
+type GlobalAddressesGetCall struct {
+ s *Service
+ project string
+ address string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Get: Returns the specified address resource.
+//
+// - address: Name of the address resource to return.
+// - project: Project ID for this request.
+func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall {
+ c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.address = address
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *FutureReservationsAggregatedListCall) Fields(s ...googleapi.Field) *FutureReservationsAggregatedListCall {
+func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -14298,27 +14314,27 @@ func (c *FutureReservationsAggregatedListCall) Fields(s ...googleapi.Field) *Fut
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *FutureReservationsAggregatedListCall) IfNoneMatch(entityTag string) *FutureReservationsAggregatedListCall {
+func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsAggregatedListCall) Context(ctx context.Context) *FutureReservationsAggregatedListCall {
+func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *FutureReservationsAggregatedListCall) Header() http.Header {
+func (c *GlobalAddressesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *FutureReservationsAggregatedListCall) doRequest(alt string) (*http.Response, error) {
+func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -14326,7 +14342,7 @@ func (c *FutureReservationsAggregatedListCall) doRequest(alt string) (*http.Resp
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/futureReservations")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
@@ -14335,17 +14351,17 @@ func (c *FutureReservationsAggregatedListCall) doRequest(alt string) (*http.Resp
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"project": c.project,
+ "address": c.address,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.futureReservations.aggregatedList" call.
+// Do executes the "compute.globalAddresses.get" call.
// Any non-2xx status code is an error. Response headers are in either
-// *FutureReservationsAggregatedListResponse.ServerResponse.Header or (if a
-// response was returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *FutureReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*FutureReservationsAggregatedListResponse, error) {
+// *Address.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -14364,7 +14380,7 @@ func (c *FutureReservationsAggregatedListCall) Do(opts ...googleapi.CallOption)
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &FutureReservationsAggregatedListResponse{
+ ret := &Address{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -14377,48 +14393,23 @@ func (c *FutureReservationsAggregatedListCall) Do(opts ...googleapi.CallOption)
return ret, nil
}
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *FutureReservationsAggregatedListCall) Pages(ctx context.Context, f func(*FutureReservationsAggregatedListResponse) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken"))
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-type FutureReservationsCancelCall struct {
- s *Service
- project string
- zone string
- futureReservation string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type GlobalAddressesInsertCall struct {
+ s *Service
+ project string
+ address *Address
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Cancel: Cancel the specified future reservation.
+// Insert: Creates an address resource in the specified project by using the
+// data included in the request.
//
-// - futureReservation: Name of the future reservation to retrieve. Name should
-// conform to RFC1035.
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Cancel(project string, zone string, futureReservation string) *FutureReservationsCancelCall {
- c := &FutureReservationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall {
+ c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.zone = zone
- c.futureReservation = futureReservation
+ c.address = address
return c
}
@@ -14432,7 +14423,7 @@ func (r *FutureReservationsService) Cancel(project string, zone string, futureRe
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *FutureReservationsCancelCall) RequestId(requestId string) *FutureReservationsCancelCall {
+func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -14440,32 +14431,36 @@ func (c *FutureReservationsCancelCall) RequestId(requestId string) *FutureReserv
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *FutureReservationsCancelCall) Fields(s ...googleapi.Field) *FutureReservationsCancelCall {
+func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsCancelCall) Context(ctx context.Context) *FutureReservationsCancelCall {
+func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *FutureReservationsCancelCall) Header() http.Header {
+func (c *GlobalAddressesInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *FutureReservationsCancelCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.address)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations/{futureReservation}/cancel")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -14473,1061 +14468,17 @@ func (c *FutureReservationsCancelCall) doRequest(alt string) (*http.Response, er
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "futureReservation": c.futureReservation,
+ "project": c.project,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.futureReservations.cancel" call.
+// Do executes the "compute.globalAddresses.insert" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *FutureReservationsCancelCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type FutureReservationsDeleteCall struct {
- s *Service
- project string
- zone string
- futureReservation string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Deletes the specified future reservation.
-//
-// - futureReservation: Name of the future reservation to retrieve. Name should
-// conform to RFC1035.
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Delete(project string, zone string, futureReservation string) *FutureReservationsDeleteCall {
- c := &FutureReservationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.futureReservation = futureReservation
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *FutureReservationsDeleteCall) RequestId(requestId string) *FutureReservationsDeleteCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsDeleteCall) Fields(s ...googleapi.Field) *FutureReservationsDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsDeleteCall) Context(ctx context.Context) *FutureReservationsDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations/{futureReservation}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "futureReservation": c.futureReservation,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.delete" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *FutureReservationsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type FutureReservationsGetCall struct {
- s *Service
- project string
- zone string
- futureReservation string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Retrieves information about the specified future reservation.
-//
-// - futureReservation: Name of the future reservation to retrieve. Name should
-// conform to RFC1035.
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Get(project string, zone string, futureReservation string) *FutureReservationsGetCall {
- c := &FutureReservationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.futureReservation = futureReservation
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsGetCall) Fields(s ...googleapi.Field) *FutureReservationsGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *FutureReservationsGetCall) IfNoneMatch(entityTag string) *FutureReservationsGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsGetCall) Context(ctx context.Context) *FutureReservationsGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations/{futureReservation}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "futureReservation": c.futureReservation,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.get" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *FutureReservation.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *FutureReservationsGetCall) Do(opts ...googleapi.CallOption) (*FutureReservation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &FutureReservation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type FutureReservationsInsertCall struct {
- s *Service
- project string
- zone string
- futurereservation *FutureReservation
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates a new Future Reservation.
-//
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Insert(project string, zone string, futurereservation *FutureReservation) *FutureReservationsInsertCall {
- c := &FutureReservationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.futurereservation = futurereservation
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *FutureReservationsInsertCall) RequestId(requestId string) *FutureReservationsInsertCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsInsertCall) Fields(s ...googleapi.Field) *FutureReservationsInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsInsertCall) Context(ctx context.Context) *FutureReservationsInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.futurereservation)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.insert" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *FutureReservationsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type FutureReservationsListCall struct {
- s *Service
- project string
- zone string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: A list of all the future reservations that have been configured for
-// the specified project in specified zone.
-//
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) List(project string, zone string) *FutureReservationsListCall {
- c := &FutureReservationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- return c
-}
-
-// Filter sets the optional parameter "filter": A filter expression that
-// filters resources listed in the response. Most Compute resources support two
-// types of filter expressions: expressions that support regular expressions
-// and expressions that follow API improvement proposal AIP-160. These two
-// types of filter expressions cannot be mixed in one request. If you want to
-// use AIP-160, your expression must specify the field name, an operator, and
-// the value that you want to use for filtering. The value must be a string, a
-// number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`,
-// `>=` or `:`. For example, if you are filtering Compute Engine instances, you
-// can exclude instances named `example-instance` by specifying `name !=
-// example-instance`. The `:*` comparison can be used to test whether a key has
-// been defined. For example, to find all objects with `owner` label use: ```
-// labels.owner:* ``` You can also filter nested fields. For example, you could
-// specify `scheduling.automaticRestart = false` to include instances only if
-// they are not scheduled for automatic restarts. You can use filtering on
-// nested fields to filter based on resource labels. To filter on multiple
-// expressions, provide each separate expression within parentheses. For
-// example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel
-// Skylake") ``` By default, each expression is an `AND` expression. However,
-// you can include `AND` and `OR` expressions explicitly. For example: ```
-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
-// (scheduling.automaticRestart = true) ``` If you want to use a regular
-// expression, use the `eq` (equal) or `ne` (not equal) operator against a
-// single un-parenthesized expression with or without quotes or against
-// multiple parenthesized expressions. Examples: `fieldname eq unquoted
-// literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted
-// literal" `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal
-// value is interpreted as a regular expression using Google RE2 library
-// syntax. The literal value must match the entire field. For example, to
-// filter for instances that do not end with name "instance", you would use
-// `name ne .*instance`. You cannot combine constraints on multiple fields
-// using regular expressions.
-func (c *FutureReservationsListCall) Filter(filter string) *FutureReservationsListCall {
- c.urlParams_.Set("filter", filter)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": The maximum number of
-// results per page that should be returned. If the number of available results
-// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
-// can be used to get the next page of results in subsequent list requests.
-// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *FutureReservationsListCall) MaxResults(maxResults int64) *FutureReservationsListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// OrderBy sets the optional parameter "orderBy": Sorts list results by a
-// certain order. By default, results are returned in alphanumerical order
-// based on the resource name. You can also sort results in descending order
-// based on the creation timestamp using `orderBy="creationTimestamp desc".
-// This sorts results based on the `creationTimestamp` field in reverse
-// chronological order (newest result first). Use this to sort resources like
-// operations so that the newest operation is returned first. Currently, only
-// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *FutureReservationsListCall) OrderBy(orderBy string) *FutureReservationsListCall {
- c.urlParams_.Set("orderBy", orderBy)
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Specifies a page token to
-// use. Set `pageToken` to the `nextPageToken` returned by a previous list
-// request to get the next page of results.
-func (c *FutureReservationsListCall) PageToken(pageToken string) *FutureReservationsListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess":
-// Opt-in for partial success behavior which provides partial results in case
-// of failure. The default value is false. For example, when partial success
-// behavior is enabled, aggregatedList for a single zone scope either returns
-// all resources in the zone or no resources, with an error code.
-func (c *FutureReservationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *FutureReservationsListCall {
- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsListCall) Fields(s ...googleapi.Field) *FutureReservationsListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *FutureReservationsListCall) IfNoneMatch(entityTag string) *FutureReservationsListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsListCall) Context(ctx context.Context) *FutureReservationsListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.list" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *FutureReservationsListResponse.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *FutureReservationsListCall) Do(opts ...googleapi.CallOption) (*FutureReservationsListResponse, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &FutureReservationsListResponse{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *FutureReservationsListCall) Pages(ctx context.Context, f func(*FutureReservationsListResponse) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken"))
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-type FutureReservationsUpdateCall struct {
- s *Service
- project string
- zone string
- futureReservation string
- futurereservation *FutureReservation
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates the specified future reservation.
-//
-// - futureReservation: Name of the reservation to update. Name should conform
-// to RFC1035.
-// - project: Project ID for this request.
-// - zone: Name of the zone for this request. Name should conform to RFC1035.
-func (r *FutureReservationsService) Update(project string, zone string, futureReservation string, futurereservation *FutureReservation) *FutureReservationsUpdateCall {
- c := &FutureReservationsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.futureReservation = futureReservation
- c.futurereservation = futurereservation
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *FutureReservationsUpdateCall) RequestId(requestId string) *FutureReservationsUpdateCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// UpdateMask sets the optional parameter "updateMask": update_mask indicates
-// fields to be updated as part of this request.
-func (c *FutureReservationsUpdateCall) UpdateMask(updateMask string) *FutureReservationsUpdateCall {
- c.urlParams_.Set("updateMask", updateMask)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *FutureReservationsUpdateCall) Fields(s ...googleapi.Field) *FutureReservationsUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *FutureReservationsUpdateCall) Context(ctx context.Context) *FutureReservationsUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *FutureReservationsUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FutureReservationsUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.futurereservation)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/futureReservations/{futureReservation}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("PATCH", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "futureReservation": c.futureReservation,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.futureReservations.update" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *FutureReservationsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type GlobalAddressesDeleteCall struct {
- s *Service
- project string
- address string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Deletes the specified address resource.
-//
-// - address: Name of the address resource to delete.
-// - project: Project ID for this request.
-func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall {
- c := &GlobalAddressesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.address = address
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *GlobalAddressesDeleteCall) RequestId(requestId string) *GlobalAddressesDeleteCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddressesDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *GlobalAddressesDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "address": c.address,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.globalAddresses.delete" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type GlobalAddressesGetCall struct {
- s *Service
- project string
- address string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns the specified address resource.
-//
-// - address: Name of the address resource to return.
-// - project: Project ID for this request.
-func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall {
- c := &GlobalAddressesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.address = address
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *GlobalAddressesGetCall) IfNoneMatch(entityTag string) *GlobalAddressesGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *GlobalAddressesGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "address": c.address,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.globalAddresses.get" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Address.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Address{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type GlobalAddressesInsertCall struct {
- s *Service
- project string
- address *Address
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates an address resource in the specified project by using the
-// data included in the request.
-//
-// - project: Project ID for this request.
-func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall {
- c := &GlobalAddressesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.address = address
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *GlobalAddressesInsertCall) RequestId(requestId string) *GlobalAddressesInsertCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddressesInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *GlobalAddressesInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.address)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.globalAddresses.insert" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -55959,28 +54910,382 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err
return ret, nil
}
-type ProjectsMoveInstanceCall struct {
- s *Service
- project string
- instancemoverequest *InstanceMoveRequest
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type ProjectsMoveInstanceCall struct {
+ s *Service
+ project string
+ instancemoverequest *InstanceMoveRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// MoveInstance: Moves an instance and its attached persistent disks from one
+// zone to another. *Note*: Moving VMs or disks by using this method might
+// cause unexpected behavior. For more information, see the known issue
+// (/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_the_mov
+// einstance_api_or_the_causes_unexpected_behavior). [Deprecated] This method
+// is deprecated. See moving instance across zones
+// (/compute/docs/instances/moving-instance-across-zones) instead.
+//
+// - project: Project ID for this request.
+func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall {
+ c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.instancemoverequest = instancemoverequest
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *ProjectsMoveInstanceCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveInstance")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.projects.moveInstance" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type ProjectsSetCloudArmorTierCall struct {
+ s *Service
+ project string
+ projectssetcloudarmortierrequest *ProjectsSetCloudArmorTierRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// SetCloudArmorTier: Sets the Cloud Armor tier of the project. To set
+// ENTERPRISE or above the billing account of the project must be subscribed to
+// Cloud Armor Enterprise. See Subscribing to Cloud Armor Enterprise for more
+// information.
+//
+// - project: Project ID for this request.
+func (r *ProjectsService) SetCloudArmorTier(project string, projectssetcloudarmortierrequest *ProjectsSetCloudArmorTierRequest) *ProjectsSetCloudArmorTierCall {
+ c := &ProjectsSetCloudArmorTierCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.projectssetcloudarmortierrequest = projectssetcloudarmortierrequest
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *ProjectsSetCloudArmorTierCall) RequestId(requestId string) *ProjectsSetCloudArmorTierCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *ProjectsSetCloudArmorTierCall) Fields(s ...googleapi.Field) *ProjectsSetCloudArmorTierCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *ProjectsSetCloudArmorTierCall) Context(ctx context.Context) *ProjectsSetCloudArmorTierCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *ProjectsSetCloudArmorTierCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *ProjectsSetCloudArmorTierCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetcloudarmortierrequest)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCloudArmorTier")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.projects.setCloudArmorTier" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *ProjectsSetCloudArmorTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type ProjectsSetCommonInstanceMetadataCall struct {
+ s *Service
+ project string
+ metadata *Metadata
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// SetCommonInstanceMetadata: Sets metadata common to all instances within the
+// specified project using the data included in the request.
+//
+// - project: Project ID for this request.
+func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall {
+ c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.metadata = metadata
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCommonInstanceMetadata")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.projects.setCommonInstanceMetadata" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type ProjectsSetDefaultNetworkTierCall struct {
+ s *Service
+ project string
+ projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// MoveInstance: Moves an instance and its attached persistent disks from one
-// zone to another. *Note*: Moving VMs or disks by using this method might
-// cause unexpected behavior. For more information, see the known issue
-// (/compute/docs/troubleshooting/known-issues#moving_vms_or_disks_using_the_mov
-// einstance_api_or_the_causes_unexpected_behavior). [Deprecated] This method
-// is deprecated. See moving instance across zones
-// (/compute/docs/instances/moving-instance-across-zones) instead.
+// SetDefaultNetworkTier: Sets the default network tier of the project. The
+// default network tier is used when an address/forwardingRule/instance is
+// created without specifying the network tier field.
//
// - project: Project ID for this request.
-func (r *ProjectsService) MoveInstance(project string, instancemoverequest *InstanceMoveRequest) *ProjectsMoveInstanceCall {
- c := &ProjectsMoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall {
+ c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.instancemoverequest = instancemoverequest
+ c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest
return c
}
@@ -55994,7 +55299,7 @@ func (r *ProjectsService) MoveInstance(project string, instancemoverequest *Inst
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInstanceCall {
+func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56002,36 +55307,36 @@ func (c *ProjectsMoveInstanceCall) RequestId(requestId string) *ProjectsMoveInst
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsMoveInstanceCall) Fields(s ...googleapi.Field) *ProjectsMoveInstanceCall {
+func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveInstanceCall {
+func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsMoveInstanceCall) Header() http.Header {
+func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) {
+func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest)
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest)
if err != nil {
return nil, err
}
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/moveInstance")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setDefaultNetworkTier")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56044,12 +55349,12 @@ func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error)
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.moveInstance" call.
+// Do executes the "compute.projects.setDefaultNetworkTier" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56081,25 +55386,24 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation,
return ret, nil
}
-type ProjectsSetCloudArmorTierCall struct {
- s *Service
- project string
- projectssetcloudarmortierrequest *ProjectsSetCloudArmorTierRequest
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type ProjectsSetUsageExportBucketCall struct {
+ s *Service
+ project string
+ usageexportlocation *UsageExportLocation
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// SetCloudArmorTier: Sets the Cloud Armor tier of the project. To set
-// ENTERPRISE or above the billing account of the project must be subscribed to
-// Cloud Armor Enterprise. See Subscribing to Cloud Armor Enterprise for more
-// information.
+// SetUsageExportBucket: Enables the usage export feature and sets the usage
+// export bucket where reports are stored. If you provide an empty request body
+// using this method, the usage export feature will be disabled.
//
// - project: Project ID for this request.
-func (r *ProjectsService) SetCloudArmorTier(project string, projectssetcloudarmortierrequest *ProjectsSetCloudArmorTierRequest) *ProjectsSetCloudArmorTierCall {
- c := &ProjectsSetCloudArmorTierCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall {
+ c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.projectssetcloudarmortierrequest = projectssetcloudarmortierrequest
+ c.usageexportlocation = usageexportlocation
return c
}
@@ -56113,7 +55417,7 @@ func (r *ProjectsService) SetCloudArmorTier(project string, projectssetcloudarmo
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsSetCloudArmorTierCall) RequestId(requestId string) *ProjectsSetCloudArmorTierCall {
+func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56121,36 +55425,36 @@ func (c *ProjectsSetCloudArmorTierCall) RequestId(requestId string) *ProjectsSet
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsSetCloudArmorTierCall) Fields(s ...googleapi.Field) *ProjectsSetCloudArmorTierCall {
+func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsSetCloudArmorTierCall) Context(ctx context.Context) *ProjectsSetCloudArmorTierCall {
+func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsSetCloudArmorTierCall) Header() http.Header {
+func (c *ProjectsSetUsageExportBucketCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsSetCloudArmorTierCall) doRequest(alt string) (*http.Response, error) {
+func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetcloudarmortierrequest)
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation)
if err != nil {
return nil, err
}
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCloudArmorTier")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setUsageExportBucket")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56163,12 +55467,12 @@ func (c *ProjectsSetCloudArmorTierCall) doRequest(alt string) (*http.Response, e
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.setCloudArmorTier" call.
+// Do executes the "compute.projects.setUsageExportBucket" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsSetCloudArmorTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56200,23 +55504,24 @@ func (c *ProjectsSetCloudArmorTierCall) Do(opts ...googleapi.CallOption) (*Opera
return ret, nil
}
-type ProjectsSetCommonInstanceMetadataCall struct {
- s *Service
- project string
- metadata *Metadata
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicAdvertisedPrefixesAnnounceCall struct {
+ s *Service
+ project string
+ publicAdvertisedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// SetCommonInstanceMetadata: Sets metadata common to all instances within the
-// specified project using the data included in the request.
+// Announce: Announces the specified PublicAdvertisedPrefix
//
-// - project: Project ID for this request.
-func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall {
- c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - publicAdvertisedPrefix: The name of the public advertised prefix. It
+// should comply with RFC1035.
+func (r *PublicAdvertisedPrefixesService) Announce(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesAnnounceCall {
+ c := &PublicAdvertisedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.metadata = metadata
+ c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
@@ -56230,7 +55535,7 @@ func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Me
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *ProjectsSetCommonInstanceMetadataCall {
+func (c *PublicAdvertisedPrefixesAnnounceCall) RequestId(requestId string) *PublicAdvertisedPrefixesAnnounceCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56238,36 +55543,32 @@ func (c *ProjectsSetCommonInstanceMetadataCall) RequestId(requestId string) *Pro
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall {
+func (c *PublicAdvertisedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesAnnounceCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *ProjectsSetCommonInstanceMetadataCall {
+func (c *PublicAdvertisedPrefixesAnnounceCall) Context(ctx context.Context) *PublicAdvertisedPrefixesAnnounceCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsSetCommonInstanceMetadataCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesAnnounceCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+func (c *PublicAdvertisedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata)
- if err != nil {
- return nil, err
- }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setCommonInstanceMetadata")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56275,17 +55576,18 @@ func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Res
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.setCommonInstanceMetadata" call.
+// Do executes the "compute.publicAdvertisedPrefixes.announce" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicAdvertisedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56317,24 +55619,24 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption)
return ret, nil
}
-type ProjectsSetDefaultNetworkTierCall struct {
- s *Service
- project string
- projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicAdvertisedPrefixesDeleteCall struct {
+ s *Service
+ project string
+ publicAdvertisedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// SetDefaultNetworkTier: Sets the default network tier of the project. The
-// default network tier is used when an address/forwardingRule/instance is
-// created without specifying the network tier field.
+// Delete: Deletes the specified PublicAdvertisedPrefix
//
-// - project: Project ID for this request.
-func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefaultnetworktierrequest *ProjectsSetDefaultNetworkTierRequest) *ProjectsSetDefaultNetworkTierCall {
- c := &ProjectsSetDefaultNetworkTierCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
+// delete.
+func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall {
+ c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.projectssetdefaultnetworktierrequest = projectssetdefaultnetworktierrequest
+ c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
@@ -56348,7 +55650,7 @@ func (r *ProjectsService) SetDefaultNetworkTier(project string, projectssetdefau
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *ProjectsSetDefaultNetworkTierCall {
+func (c *PublicAdvertisedPrefixesDeleteCall) RequestId(requestId string) *PublicAdvertisedPrefixesDeleteCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56356,54 +55658,51 @@ func (c *ProjectsSetDefaultNetworkTierCall) RequestId(requestId string) *Project
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsSetDefaultNetworkTierCall) Fields(s ...googleapi.Field) *ProjectsSetDefaultNetworkTierCall {
+func (c *PublicAdvertisedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsSetDefaultNetworkTierCall) Context(ctx context.Context) *ProjectsSetDefaultNetworkTierCall {
+func (c *PublicAdvertisedPrefixesDeleteCall) Context(ctx context.Context) *PublicAdvertisedPrefixesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsSetDefaultNetworkTierCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsSetDefaultNetworkTierCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+func (c *PublicAdvertisedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.projectssetdefaultnetworktierrequest)
- if err != nil {
- return nil, err
- }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setDefaultNetworkTier")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
+ req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.setDefaultNetworkTier" call.
+// Do executes the "compute.publicAdvertisedPrefixes.delete" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56435,93 +55734,88 @@ func (c *ProjectsSetDefaultNetworkTierCall) Do(opts ...googleapi.CallOption) (*O
return ret, nil
}
-type ProjectsSetUsageExportBucketCall struct {
- s *Service
- project string
- usageexportlocation *UsageExportLocation
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicAdvertisedPrefixesGetCall struct {
+ s *Service
+ project string
+ publicAdvertisedPrefix string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
}
-// SetUsageExportBucket: Enables the usage export feature and sets the usage
-// export bucket where reports are stored. If you provide an empty request body
-// using this method, the usage export feature will be disabled.
+// Get: Returns the specified PublicAdvertisedPrefix resource.
//
-// - project: Project ID for this request.
-func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall {
- c := &ProjectsSetUsageExportBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
+// return.
+func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall {
+ c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.usageexportlocation = usageexportlocation
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *ProjectsSetUsageExportBucketCall) RequestId(requestId string) *ProjectsSetUsageExportBucketCall {
- c.urlParams_.Set("requestId", requestId)
+ c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall {
+func (c *PublicAdvertisedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
+// IfNoneMatch sets an optional parameter which makes the operation fail if the
+// object's ETag matches the given value. This is useful for getting updates
+// only after the object has changed since the last request.
+func (c *PublicAdvertisedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
// Context sets the context to be used in this call's Do method.
-func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *ProjectsSetUsageExportBucketCall {
+func (c *PublicAdvertisedPrefixesGetCall) Context(ctx context.Context) *PublicAdvertisedPrefixesGetCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *ProjectsSetUsageExportBucketCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation)
- if err != nil {
- return nil, err
+func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
+ var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/setUsageExportBucket")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
+ req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.projects.setUsageExportBucket" call.
+// Do executes the "compute.publicAdvertisedPrefixes.get" call.
// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+// *PublicAdvertisedPrefix.ServerResponse.Header or (if a response was returned
+// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified was
+// returned.
+func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefix, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56540,7 +55834,7 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &Operation{
+ ret := &PublicAdvertisedPrefix{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -56553,24 +55847,23 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op
return ret, nil
}
-type PublicAdvertisedPrefixesAnnounceCall struct {
+type PublicAdvertisedPrefixesInsertCall struct {
s *Service
project string
- publicAdvertisedPrefix string
+ publicadvertisedprefix *PublicAdvertisedPrefix
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
-// Announce: Announces the specified PublicAdvertisedPrefix
+// Insert: Creates a PublicAdvertisedPrefix in the specified project using the
+// parameters that are included in the request.
//
-// - project: Project ID for this request.
-// - publicAdvertisedPrefix: The name of the public advertised prefix. It
-// should comply with RFC1035.
-func (r *PublicAdvertisedPrefixesService) Announce(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesAnnounceCall {
- c := &PublicAdvertisedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesInsertCall {
+ c := &PublicAdvertisedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicAdvertisedPrefix = publicAdvertisedPrefix
+ c.publicadvertisedprefix = publicadvertisedprefix
return c
}
@@ -56584,7 +55877,7 @@ func (r *PublicAdvertisedPrefixesService) Announce(project string, publicAdverti
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesAnnounceCall) RequestId(requestId string) *PublicAdvertisedPrefixesAnnounceCall {
+func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *PublicAdvertisedPrefixesInsertCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56592,32 +55885,36 @@ func (c *PublicAdvertisedPrefixesAnnounceCall) RequestId(requestId string) *Publ
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesAnnounceCall {
+func (c *PublicAdvertisedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesAnnounceCall) Context(ctx context.Context) *PublicAdvertisedPrefixesAnnounceCall {
+func (c *PublicAdvertisedPrefixesInsertCall) Context(ctx context.Context) *PublicAdvertisedPrefixesInsertCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesAnnounceCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56625,18 +55922,17 @@ func (c *PublicAdvertisedPrefixesAnnounceCall) doRequest(alt string) (*http.Resp
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
+ "project": c.project,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.announce" call.
+// Do executes the "compute.publicAdvertisedPrefixes.insert" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56668,90 +55964,161 @@ func (c *PublicAdvertisedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption)
return ret, nil
}
-type PublicAdvertisedPrefixesDeleteCall struct {
- s *Service
- project string
- publicAdvertisedPrefix string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicAdvertisedPrefixesListCall struct {
+ s *Service
+ project string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
}
-// Delete: Deletes the specified PublicAdvertisedPrefix
+// List: Lists the PublicAdvertisedPrefixes for a project.
//
-// - project: Project ID for this request.
-// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
-// delete.
-func (r *PublicAdvertisedPrefixesService) Delete(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesDeleteCall {
- c := &PublicAdvertisedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertisedPrefixesListCall {
+ c := &PublicAdvertisedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesDeleteCall) RequestId(requestId string) *PublicAdvertisedPrefixesDeleteCall {
- c.urlParams_.Set("requestId", requestId)
+// Filter sets the optional parameter "filter": A filter expression that
+// filters resources listed in the response. Most Compute resources support two
+// types of filter expressions: expressions that support regular expressions
+// and expressions that follow API improvement proposal AIP-160. These two
+// types of filter expressions cannot be mixed in one request. If you want to
+// use AIP-160, your expression must specify the field name, an operator, and
+// the value that you want to use for filtering. The value must be a string, a
+// number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`,
+// `>=` or `:`. For example, if you are filtering Compute Engine instances, you
+// can exclude instances named `example-instance` by specifying `name !=
+// example-instance`. The `:*` comparison can be used to test whether a key has
+// been defined. For example, to find all objects with `owner` label use: ```
+// labels.owner:* ``` You can also filter nested fields. For example, you could
+// specify `scheduling.automaticRestart = false` to include instances only if
+// they are not scheduled for automatic restarts. You can use filtering on
+// nested fields to filter based on resource labels. To filter on multiple
+// expressions, provide each separate expression within parentheses. For
+// example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel
+// Skylake") ``` By default, each expression is an `AND` expression. However,
+// you can include `AND` and `OR` expressions explicitly. For example: ```
+// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
+// (scheduling.automaticRestart = true) ``` If you want to use a regular
+// expression, use the `eq` (equal) or `ne` (not equal) operator against a
+// single un-parenthesized expression with or without quotes or against
+// multiple parenthesized expressions. Examples: `fieldname eq unquoted
+// literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted
+// literal" `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal
+// value is interpreted as a regular expression using Google RE2 library
+// syntax. The literal value must match the entire field. For example, to
+// filter for instances that do not end with name "instance", you would use
+// `name ne .*instance`. You cannot combine constraints on multiple fields
+// using regular expressions.
+func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("filter", filter)
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": The maximum number of
+// results per page that should be returned. If the number of available results
+// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
+// can be used to get the next page of results in subsequent list requests.
+// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
+func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
+ return c
+}
+
+// OrderBy sets the optional parameter "orderBy": Sorts list results by a
+// certain order. By default, results are returned in alphanumerical order
+// based on the resource name. You can also sort results in descending order
+// based on the creation timestamp using `orderBy="creationTimestamp desc".
+// This sorts results based on the `creationTimestamp` field in reverse
+// chronological order (newest result first). Use this to sort resources like
+// operations so that the newest operation is returned first. Currently, only
+// sorting by `name` or `creationTimestamp desc` is supported.
+func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("orderBy", orderBy)
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Specifies a page token to
+// use. Set `pageToken` to the `nextPageToken` returned by a previous list
+// request to get the next page of results.
+func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess":
+// Opt-in for partial success behavior which provides partial results in case
+// of failure. The default value is false. For example, when partial success
+// behavior is enabled, aggregatedList for a single zone scope either returns
+// all resources in the zone or no resources, with an error code.
+func (c *PublicAdvertisedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicAdvertisedPrefixesListCall {
+ c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesDeleteCall {
+func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
+// IfNoneMatch sets an optional parameter which makes the operation fail if the
+// object's ETag matches the given value. This is useful for getting updates
+// only after the object has changed since the last request.
+func (c *PublicAdvertisedPrefixesListCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesDeleteCall) Context(ctx context.Context) *PublicAdvertisedPrefixesDeleteCall {
+func (c *PublicAdvertisedPrefixesListCall) Context(ctx context.Context) *PublicAdvertisedPrefixesListCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesDeleteCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) {
+func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
+ req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
+ "project": c.project,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.delete" call.
+// Do executes the "compute.publicAdvertisedPrefixes.list" call.
// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+// *PublicAdvertisedPrefixList.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefixList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56770,7 +56137,7 @@ func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &Operation{
+ ret := &PublicAdvertisedPrefixList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -56783,70 +56150,102 @@ func (c *PublicAdvertisedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*
return ret, nil
}
-type PublicAdvertisedPrefixesGetCall struct {
+// Pages invokes f for each page of results.
+// A non-nil error returned from f will halt the iteration.
+// The provided context supersedes any context provided to the Context method.
+func (c *PublicAdvertisedPrefixesListCall) Pages(ctx context.Context, f func(*PublicAdvertisedPrefixList) error) error {
+ c.ctx_ = ctx
+ defer c.PageToken(c.urlParams_.Get("pageToken"))
+ for {
+ x, err := c.Do()
+ if err != nil {
+ return err
+ }
+ if err := f(x); err != nil {
+ return err
+ }
+ if x.NextPageToken == "" {
+ return nil
+ }
+ c.PageToken(x.NextPageToken)
+ }
+}
+
+type PublicAdvertisedPrefixesPatchCall struct {
s *Service
project string
publicAdvertisedPrefix string
+ publicadvertisedprefix *PublicAdvertisedPrefix
urlParams_ gensupport.URLParams
- ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
-// Get: Returns the specified PublicAdvertisedPrefix resource.
+// Patch: Patches the specified Router resource with the data included in the
+// request. This method supports PATCH semantics and uses JSON merge patch
+// format and processing rules.
//
// - project: Project ID for this request.
// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
-// return.
-func (r *PublicAdvertisedPrefixesService) Get(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesGetCall {
- c := &PublicAdvertisedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// patch.
+func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall {
+ c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.publicAdvertisedPrefix = publicAdvertisedPrefix
+ c.publicadvertisedprefix = publicadvertisedprefix
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicAdvertisedPrefixesPatchCall {
+ c.urlParams_.Set("requestId", requestId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesGetCall {
+func (c *PublicAdvertisedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *PublicAdvertisedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesGetCall) Context(ctx context.Context) *PublicAdvertisedPrefixesGetCall {
+func (c *PublicAdvertisedPrefixesPatchCall) Context(ctx context.Context) *PublicAdvertisedPrefixesPatchCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesGetCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
+func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
+ req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
@@ -56858,13 +56257,12 @@ func (c *PublicAdvertisedPrefixesGetCall) doRequest(alt string) (*http.Response,
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.get" call.
+// Do executes the "compute.publicAdvertisedPrefixes.patch" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicAdvertisedPrefix.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefix, error) {
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -56883,7 +56281,7 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicAdvertisedPrefix{
+ ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -56896,23 +56294,24 @@ func (c *PublicAdvertisedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Pub
return ret, nil
}
-type PublicAdvertisedPrefixesInsertCall struct {
+type PublicAdvertisedPrefixesWithdrawCall struct {
s *Service
project string
- publicadvertisedprefix *PublicAdvertisedPrefix
+ publicAdvertisedPrefix string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
-// Insert: Creates a PublicAdvertisedPrefix in the specified project using the
-// parameters that are included in the request.
+// Withdraw: Withdraws the specified PublicAdvertisedPrefix
//
-// - project: Project ID for this request.
-func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesInsertCall {
- c := &PublicAdvertisedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - publicAdvertisedPrefix: The name of the public advertised prefix. It
+// should comply with RFC1035.
+func (r *PublicAdvertisedPrefixesService) Withdraw(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesWithdrawCall {
+ c := &PublicAdvertisedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicadvertisedprefix = publicadvertisedprefix
+ c.publicAdvertisedPrefix = publicAdvertisedPrefix
return c
}
@@ -56926,7 +56325,7 @@ func (r *PublicAdvertisedPrefixesService) Insert(project string, publicadvertise
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *PublicAdvertisedPrefixesInsertCall {
+func (c *PublicAdvertisedPrefixesWithdrawCall) RequestId(requestId string) *PublicAdvertisedPrefixesWithdrawCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -56934,36 +56333,32 @@ func (c *PublicAdvertisedPrefixesInsertCall) RequestId(requestId string) *Public
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesInsertCall {
+func (c *PublicAdvertisedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesWithdrawCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesInsertCall) Context(ctx context.Context) *PublicAdvertisedPrefixesInsertCall {
+func (c *PublicAdvertisedPrefixesWithdrawCall) Context(ctx context.Context) *PublicAdvertisedPrefixesWithdrawCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesInsertCall) Header() http.Header {
+func (c *PublicAdvertisedPrefixesWithdrawCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+func (c *PublicAdvertisedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix)
- if err != nil {
- return nil, err
- }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -56971,17 +56366,18 @@ func (c *PublicAdvertisedPrefixesInsertCall) doRequest(alt string) (*http.Respon
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.insert" call.
+// Do executes the "compute.publicAdvertisedPrefixes.withdraw" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicAdvertisedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57013,7 +56409,7 @@ func (c *PublicAdvertisedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*
return ret, nil
}
-type PublicAdvertisedPrefixesListCall struct {
+type PublicDelegatedPrefixesAggregatedListCall struct {
s *Service
project string
urlParams_ gensupport.URLParams
@@ -57022,11 +56418,13 @@ type PublicAdvertisedPrefixesListCall struct {
header_ http.Header
}
-// List: Lists the PublicAdvertisedPrefixes for a project.
+// AggregatedList: Lists all PublicDelegatedPrefix resources owned by the
+// specific project across all scopes. To prevent failure, Google recommends
+// that you set the `returnPartialSuccess` parameter to `true`.
//
-// - project: Project ID for this request.
-func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertisedPrefixesListCall {
- c := &PublicAdvertisedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Name of the project scoping this request.
+func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicDelegatedPrefixesAggregatedListCall {
+ c := &PublicDelegatedPrefixesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
return c
}
@@ -57063,17 +56461,30 @@ func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertised
// filter for instances that do not end with name "instance", you would use
// `name ne .*instance`. You cannot combine constraints on multiple fields
// using regular expressions.
-func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) Filter(filter string) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("filter", filter)
return c
}
+// IncludeAllScopes sets the optional parameter "includeAllScopes": Indicates
+// whether every visible scope for each scope type (zone, region, global)
+// should be included in the response. For new resource types added after this
+// field, the flag has no effect as new resource types will always include
+// every visible scope for each scope type in response. For resource types
+// which predate this field, if this flag is omitted or false, only scopes of
+// the scope types where the resource type is expected to be found will be
+// included.
+func (c *PublicDelegatedPrefixesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PublicDelegatedPrefixesAggregatedListCall {
+ c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes))
+ return c
+}
+
// MaxResults sets the optional parameter "maxResults": The maximum number of
// results per page that should be returned. If the number of available results
// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
// can be used to get the next page of results in subsequent list requests.
// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
return c
}
@@ -57086,7 +56497,7 @@ func (c *PublicAdvertisedPrefixesListCall) MaxResults(maxResults int64) *PublicA
// chronological order (newest result first). Use this to sort resources like
// operations so that the newest operation is returned first. Currently, only
// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
@@ -57094,7 +56505,7 @@ func (c *PublicAdvertisedPrefixesListCall) OrderBy(orderBy string) *PublicAdvert
// PageToken sets the optional parameter "pageToken": Specifies a page token to
// use. Set `pageToken` to the `nextPageToken` returned by a previous list
// request to get the next page of results.
-func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
@@ -57104,15 +56515,23 @@ func (c *PublicAdvertisedPrefixesListCall) PageToken(pageToken string) *PublicAd
// of failure. The default value is false. For example, when partial success
// behavior is enabled, aggregatedList for a single zone scope either returns
// all resources in the zone or no resources, with an error code.
-func (c *PublicAdvertisedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
return c
}
+// ServiceProjectNumber sets the optional parameter "serviceProjectNumber": The
+// Shared VPC service project id or service project number for which aggregated
+// list request is invoked for subnetworks list-usable api.
+func (c *PublicDelegatedPrefixesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *PublicDelegatedPrefixesAggregatedListCall {
+ c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber))
+ return c
+}
+
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAggregatedListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -57120,54 +56539,195 @@ func (c *PublicAdvertisedPrefixesListCall) Fields(s ...googleapi.Field) *PublicA
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *PublicAdvertisedPrefixesListCall) IfNoneMatch(entityTag string) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesAggregatedListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesListCall) Context(ctx context.Context) *PublicAdvertisedPrefixesListCall {
+func (c *PublicDelegatedPrefixesAggregatedListCall) Context(ctx context.Context) *PublicDelegatedPrefixesAggregatedListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *PublicDelegatedPrefixesAggregatedListCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/publicDelegatedPrefixes")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("GET", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.publicDelegatedPrefixes.aggregatedList" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *PublicDelegatedPrefixAggregatedList.ServerResponse.Header or (if a response
+// was returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixAggregatedList, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &PublicDelegatedPrefixAggregatedList{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+// Pages invokes f for each page of results.
+// A non-nil error returned from f will halt the iteration.
+// The provided context supersedes any context provided to the Context method.
+func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixAggregatedList) error) error {
+ c.ctx_ = ctx
+ defer c.PageToken(c.urlParams_.Get("pageToken"))
+ for {
+ x, err := c.Do()
+ if err != nil {
+ return err
+ }
+ if err := f(x); err != nil {
+ return err
+ }
+ if x.NextPageToken == "" {
+ return nil
+ }
+ c.PageToken(x.NextPageToken)
+ }
+}
+
+type PublicDelegatedPrefixesAnnounceCall struct {
+ s *Service
+ project string
+ region string
+ publicDelegatedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Announce: Announces the specified PublicDelegatedPrefix in the given region.
+//
+// - project: Project ID for this request.
+// - publicDelegatedPrefix: The name of the public delegated prefix. It should
+// comply with RFC1035.
+// - region: The name of the region where the public delegated prefix is
+// located. It should comply with RFC1035.
+func (r *PublicDelegatedPrefixesService) Announce(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesAnnounceCall {
+ c := &PublicDelegatedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.region = region
+ c.publicDelegatedPrefix = publicDelegatedPrefix
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *PublicDelegatedPrefixesAnnounceCall) RequestId(requestId string) *PublicDelegatedPrefixesAnnounceCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *PublicDelegatedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAnnounceCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *PublicDelegatedPrefixesAnnounceCall) Context(ctx context.Context) *PublicDelegatedPrefixesAnnounceCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesListCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesAnnounceCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesListCall) doRequest(alt string) (*http.Response, error) {
+func (c *PublicDelegatedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
+ req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
+ "project": c.project,
+ "region": c.region,
+ "publicDelegatedPrefix": c.publicDelegatedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.list" call.
+// Do executes the "compute.publicDelegatedPrefixes.announce" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicAdvertisedPrefixList.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicAdvertisedPrefixList, error) {
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *PublicDelegatedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57186,7 +56746,7 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicAdvertisedPrefixList{
+ ret := &Operation{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -57199,49 +56759,27 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu
return ret, nil
}
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *PublicAdvertisedPrefixesListCall) Pages(ctx context.Context, f func(*PublicAdvertisedPrefixList) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken"))
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-type PublicAdvertisedPrefixesPatchCall struct {
- s *Service
- project string
- publicAdvertisedPrefix string
- publicadvertisedprefix *PublicAdvertisedPrefix
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicDelegatedPrefixesDeleteCall struct {
+ s *Service
+ project string
+ region string
+ publicDelegatedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Patch: Patches the specified Router resource with the data included in the
-// request. This method supports PATCH semantics and uses JSON merge patch
-// format and processing rules.
+// Delete: Deletes the specified PublicDelegatedPrefix in the given region.
//
// - project: Project ID for this request.
-// - publicAdvertisedPrefix: Name of the PublicAdvertisedPrefix resource to
-// patch.
-func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertisedPrefix string, publicadvertisedprefix *PublicAdvertisedPrefix) *PublicAdvertisedPrefixesPatchCall {
- c := &PublicAdvertisedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
+// delete.
+// - region: Name of the region of this request.
+func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall {
+ c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicAdvertisedPrefix = publicAdvertisedPrefix
- c.publicadvertisedprefix = publicadvertisedprefix
+ c.region = region
+ c.publicDelegatedPrefix = publicDelegatedPrefix
return c
}
@@ -57255,7 +56793,7 @@ func (r *PublicAdvertisedPrefixesService) Patch(project string, publicAdvertised
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicAdvertisedPrefixesPatchCall {
+func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicDelegatedPrefixesDeleteCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -57263,55 +56801,52 @@ func (c *PublicAdvertisedPrefixesPatchCall) RequestId(requestId string) *PublicA
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesPatchCall {
+func (c *PublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesPatchCall) Context(ctx context.Context) *PublicAdvertisedPrefixesPatchCall {
+func (c *PublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *PublicDelegatedPrefixesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesPatchCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicadvertisedprefix)
- if err != nil {
- return nil, err
- }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("PATCH", urls, body)
+ req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
+ "project": c.project,
+ "region": c.region,
+ "publicDelegatedPrefix": c.publicDelegatedPrefix,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.patch" call.
+// Do executes the "compute.publicDelegatedPrefixes.delete" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57343,24 +56878,144 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O
return ret, nil
}
-type PublicAdvertisedPrefixesWithdrawCall struct {
- s *Service
- project string
- publicAdvertisedPrefix string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type PublicDelegatedPrefixesGetCall struct {
+ s *Service
+ project string
+ region string
+ publicDelegatedPrefix string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
}
-// Withdraw: Withdraws the specified PublicAdvertisedPrefix
+// Get: Returns the specified PublicDelegatedPrefix resource in the given
+// region.
//
// - project: Project ID for this request.
-// - publicAdvertisedPrefix: The name of the public advertised prefix. It
-// should comply with RFC1035.
-func (r *PublicAdvertisedPrefixesService) Withdraw(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesWithdrawCall {
- c := &PublicAdvertisedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
+// return.
+// - region: Name of the region of this request.
+func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall {
+ c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
- c.publicAdvertisedPrefix = publicAdvertisedPrefix
+ c.region = region
+ c.publicDelegatedPrefix = publicDelegatedPrefix
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets an optional parameter which makes the operation fail if the
+// object's ETag matches the given value. This is useful for getting updates
+// only after the object has changed since the last request.
+func (c *PublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *PublicDelegatedPrefixesGetCall) Context(ctx context.Context) *PublicDelegatedPrefixesGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *PublicDelegatedPrefixesGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("GET", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.publicDelegatedPrefixes.get" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was returned
+// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified was
+// returned.
+func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &PublicDelegatedPrefix{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type PublicDelegatedPrefixesInsertCall struct {
+ s *Service
+ project string
+ region string
+ publicdelegatedprefix *PublicDelegatedPrefix
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Insert: Creates a PublicDelegatedPrefix in the specified project in the
+// given region using the parameters that are included in the request.
+//
+// - project: Project ID for this request.
+// - region: Name of the region of this request.
+func (r *PublicDelegatedPrefixesService) Insert(project string, region string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesInsertCall {
+ c := &PublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.region = region
+ c.publicdelegatedprefix = publicdelegatedprefix
return c
}
@@ -57374,7 +57029,7 @@ func (r *PublicAdvertisedPrefixesService) Withdraw(project string, publicAdverti
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicAdvertisedPrefixesWithdrawCall) RequestId(requestId string) *PublicAdvertisedPrefixesWithdrawCall {
+func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicDelegatedPrefixesInsertCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -57382,32 +57037,36 @@ func (c *PublicAdvertisedPrefixesWithdrawCall) RequestId(requestId string) *Publ
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicAdvertisedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesWithdrawCall {
+func (c *PublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicAdvertisedPrefixesWithdrawCall) Context(ctx context.Context) *PublicAdvertisedPrefixesWithdrawCall {
+func (c *PublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *PublicDelegatedPrefixesInsertCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicAdvertisedPrefixesWithdrawCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicAdvertisedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -57415,18 +57074,18 @@ func (c *PublicAdvertisedPrefixesWithdrawCall) doRequest(alt string) (*http.Resp
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "publicAdvertisedPrefix": c.publicAdvertisedPrefix,
+ "project": c.project,
+ "region": c.region,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicAdvertisedPrefixes.withdraw" call.
+// Do executes the "compute.publicDelegatedPrefixes.insert" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicAdvertisedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57458,23 +57117,24 @@ func (c *PublicAdvertisedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption)
return ret, nil
}
-type PublicDelegatedPrefixesAggregatedListCall struct {
+type PublicDelegatedPrefixesListCall struct {
s *Service
project string
+ region string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
-// AggregatedList: Lists all PublicDelegatedPrefix resources owned by the
-// specific project across all scopes. To prevent failure, Google recommends
-// that you set the `returnPartialSuccess` parameter to `true`.
+// List: Lists the PublicDelegatedPrefixes for a project in the given region.
//
-// - project: Name of the project scoping this request.
-func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicDelegatedPrefixesAggregatedListCall {
- c := &PublicDelegatedPrefixesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - region: Name of the region of this request.
+func (r *PublicDelegatedPrefixesService) List(project string, region string) *PublicDelegatedPrefixesListCall {
+ c := &PublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
+ c.region = region
return c
}
@@ -57510,30 +57170,17 @@ func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicD
// filter for instances that do not end with name "instance", you would use
// `name ne .*instance`. You cannot combine constraints on multiple fields
// using regular expressions.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Filter(filter string) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("filter", filter)
return c
}
-// IncludeAllScopes sets the optional parameter "includeAllScopes": Indicates
-// whether every visible scope for each scope type (zone, region, global)
-// should be included in the response. For new resource types added after this
-// field, the flag has no effect as new resource types will always include
-// every visible scope for each scope type in response. For resource types
-// which predate this field, if this flag is omitted or false, only scopes of
-// the scope types where the resource type is expected to be found will be
-// included.
-func (c *PublicDelegatedPrefixesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *PublicDelegatedPrefixesAggregatedListCall {
- c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes))
- return c
-}
-
// MaxResults sets the optional parameter "maxResults": The maximum number of
// results per page that should be returned. If the number of available results
// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
// can be used to get the next page of results in subsequent list requests.
// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
return c
}
@@ -57546,7 +57193,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) MaxResults(maxResults int64)
// chronological order (newest result first). Use this to sort resources like
// operations so that the newest operation is returned first. Currently, only
// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
@@ -57554,7 +57201,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) OrderBy(orderBy string) *Pub
// PageToken sets the optional parameter "pageToken": Specifies a page token to
// use. Set `pageToken` to the `nextPageToken` returned by a previous list
// request to get the next page of results.
-func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
@@ -57564,23 +57211,15 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) PageToken(pageToken string)
// of failure. The default value is false. For example, when partial success
// behavior is enabled, aggregatedList for a single zone scope either returns
// all resources in the zone or no resources, with an error code.
-func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
return c
}
-// ServiceProjectNumber sets the optional parameter "serviceProjectNumber": The
-// Shared VPC service project id or service project number for which aggregated
-// list request is invoked for subnetworks list-usable api.
-func (c *PublicDelegatedPrefixesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *PublicDelegatedPrefixesAggregatedListCall {
- c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber))
- return c
-}
-
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -57588,27 +57227,27 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Fields(s ...googleapi.Field)
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *PublicDelegatedPrefixesAggregatedListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Context(ctx context.Context) *PublicDelegatedPrefixesAggregatedListCall {
+func (c *PublicDelegatedPrefixesListCall) Context(ctx context.Context) *PublicDelegatedPrefixesListCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http.Response, error) {
+func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -57616,7 +57255,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/publicDelegatedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
@@ -57625,17 +57264,18 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) doRequest(alt string) (*http
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"project": c.project,
+ "region": c.region,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.aggregatedList" call.
+// Do executes the "compute.publicDelegatedPrefixes.list" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicDelegatedPrefixAggregatedList.ServerResponse.Header or (if a response
-// was returned at all) in error.(*googleapi.Error).Header. Use
+// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was because
// http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixAggregatedList, error) {
+func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57654,7 +57294,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicDelegatedPrefixAggregatedList{
+ ret := &PublicDelegatedPrefixList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -57670,7 +57310,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
-func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixAggregatedList) error) error {
+func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken"))
for {
@@ -57688,25 +57328,152 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f
}
}
-type PublicDelegatedPrefixesAnnounceCall struct {
+type PublicDelegatedPrefixesPatchCall struct {
s *Service
project string
region string
publicDelegatedPrefix string
+ publicdelegatedprefix *PublicDelegatedPrefix
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
-// Announce: Announces the specified PublicDelegatedPrefix in the given region.
+// Patch: Patches the specified PublicDelegatedPrefix resource with the data
+// included in the request. This method supports PATCH semantics and uses JSON
+// merge patch format and processing rules.
+//
+// - project: Project ID for this request.
+// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
+// patch.
+// - region: Name of the region for this request.
+func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall {
+ c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.project = project
+ c.region = region
+ c.publicDelegatedPrefix = publicDelegatedPrefix
+ c.publicdelegatedprefix = publicdelegatedprefix
+ return c
+}
+
+// RequestId sets the optional parameter "requestId": An optional request ID to
+// identify requests. Specify a unique request ID so that if you must retry
+// your request, the server will know to ignore the request if it has already
+// been completed. For example, consider a situation where you make an initial
+// request and the request times out. If you make the request again with the
+// same request ID, the server can check if original operation with the same
+// request ID was received, and if so, will ignore the second request. This
+// prevents clients from accidentally creating duplicate commitments. The
+// request ID must be a valid UUID with the exception that zero UUID is not
+// supported ( 00000000-0000-0000-0000-000000000000).
+func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDelegatedPrefixesPatchCall {
+ c.urlParams_.Set("requestId", requestId)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *PublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesPatchCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *PublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *PublicDelegatedPrefixesPatchCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *PublicDelegatedPrefixesPatchCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("PATCH", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "compute.publicDelegatedPrefixes.patch" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *Operation.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &Operation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
+type PublicDelegatedPrefixesWithdrawCall struct {
+ s *Service
+ project string
+ region string
+ publicDelegatedPrefix string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Withdraw: Withdraws the specified PublicDelegatedPrefix in the given region.
//
// - project: Project ID for this request.
// - publicDelegatedPrefix: The name of the public delegated prefix. It should
// comply with RFC1035.
// - region: The name of the region where the public delegated prefix is
// located. It should comply with RFC1035.
-func (r *PublicDelegatedPrefixesService) Announce(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesAnnounceCall {
- c := &PublicDelegatedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+func (r *PublicDelegatedPrefixesService) Withdraw(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesWithdrawCall {
+ c := &PublicDelegatedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
c.publicDelegatedPrefix = publicDelegatedPrefix
@@ -57723,7 +57490,7 @@ func (r *PublicDelegatedPrefixesService) Announce(project string, region string,
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesAnnounceCall) RequestId(requestId string) *PublicDelegatedPrefixesAnnounceCall {
+func (c *PublicDelegatedPrefixesWithdrawCall) RequestId(requestId string) *PublicDelegatedPrefixesWithdrawCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -57731,32 +57498,32 @@ func (c *PublicDelegatedPrefixesAnnounceCall) RequestId(requestId string) *Publi
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAnnounceCall {
+func (c *PublicDelegatedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesWithdrawCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesAnnounceCall) Context(ctx context.Context) *PublicDelegatedPrefixesAnnounceCall {
+func (c *PublicDelegatedPrefixesWithdrawCall) Context(ctx context.Context) *PublicDelegatedPrefixesWithdrawCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesAnnounceCall) Header() http.Header {
+func (c *PublicDelegatedPrefixesWithdrawCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) {
+func (c *PublicDelegatedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -57771,12 +57538,12 @@ func (c *PublicDelegatedPrefixesAnnounceCall) doRequest(alt string) (*http.Respo
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.announce" call.
+// Do executes the "compute.publicDelegatedPrefixes.withdraw" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *PublicDelegatedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57808,27 +57575,26 @@ func (c *PublicDelegatedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (
return ret, nil
}
-type PublicDelegatedPrefixesDeleteCall struct {
- s *Service
- project string
- region string
- publicDelegatedPrefix string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersDeleteCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Delete: Deletes the specified PublicDelegatedPrefix in the given region.
+// Delete: Deletes the specified autoscaler.
//
-// - project: Project ID for this request.
-// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
-// delete.
-// - region: Name of the region of this request.
-func (r *PublicDelegatedPrefixesService) Delete(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesDeleteCall {
- c := &PublicDelegatedPrefixesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - autoscaler: Name of the autoscaler to delete.
+// - project: Project ID for this request.
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall {
+ c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicDelegatedPrefix = publicDelegatedPrefix
+ c.autoscaler = autoscaler
return c
}
@@ -57842,7 +57608,7 @@ func (r *PublicDelegatedPrefixesService) Delete(project string, region string, p
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicDelegatedPrefixesDeleteCall {
+func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -57850,32 +57616,32 @@ func (c *PublicDelegatedPrefixesDeleteCall) RequestId(requestId string) *PublicD
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesDeleteCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesDeleteCall {
+func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesDeleteCall) Context(ctx context.Context) *PublicDelegatedPrefixesDeleteCall {
+func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesDeleteCall) Header() http.Header {
+func (c *RegionAutoscalersDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
@@ -57883,19 +57649,19 @@ func (c *PublicDelegatedPrefixesDeleteCall) doRequest(alt string) (*http.Respons
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ "project": c.project,
+ "region": c.region,
+ "autoscaler": c.autoscaler,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.delete" call.
+// Do executes the "compute.regionAutoscalers.delete" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -57927,36 +57693,34 @@ func (c *PublicDelegatedPrefixesDeleteCall) Do(opts ...googleapi.CallOption) (*O
return ret, nil
}
-type PublicDelegatedPrefixesGetCall struct {
- s *Service
- project string
- region string
- publicDelegatedPrefix string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersGetCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+ header_ http.Header
}
-// Get: Returns the specified PublicDelegatedPrefix resource in the given
-// region.
+// Get: Returns the specified autoscaler.
//
-// - project: Project ID for this request.
-// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
-// return.
-// - region: Name of the region of this request.
-func (r *PublicDelegatedPrefixesService) Get(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesGetCall {
- c := &PublicDelegatedPrefixesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - autoscaler: Name of the autoscaler to return.
+// - project: Project ID for this request.
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall {
+ c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicDelegatedPrefix = publicDelegatedPrefix
+ c.autoscaler = autoscaler
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesGetCall {
+func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -57964,27 +57728,27 @@ func (c *PublicDelegatedPrefixesGetCall) Fields(s ...googleapi.Field) *PublicDel
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *PublicDelegatedPrefixesGetCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesGetCall {
+func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesGetCall) Context(ctx context.Context) *PublicDelegatedPrefixesGetCall {
+func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesGetCall) Header() http.Header {
+func (c *RegionAutoscalersGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -57992,7 +57756,7 @@ func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response,
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
@@ -58000,20 +57764,19 @@ func (c *PublicDelegatedPrefixesGetCall) doRequest(alt string) (*http.Response,
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ "project": c.project,
+ "region": c.region,
+ "autoscaler": c.autoscaler,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.get" call.
+// Do executes the "compute.regionAutoscalers.get" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicDelegatedPrefix.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefix, error) {
+// *Autoscaler.ServerResponse.Header or (if a response was returned at all) in
+// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
+// whether the returned error was because http.StatusNotModified was returned.
+func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -58032,7 +57795,7 @@ func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Publ
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicDelegatedPrefix{
+ ret := &Autoscaler{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -58045,26 +57808,26 @@ func (c *PublicDelegatedPrefixesGetCall) Do(opts ...googleapi.CallOption) (*Publ
return ret, nil
}
-type PublicDelegatedPrefixesInsertCall struct {
- s *Service
- project string
- region string
- publicdelegatedprefix *PublicDelegatedPrefix
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersInsertCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler *Autoscaler
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Insert: Creates a PublicDelegatedPrefix in the specified project in the
-// given region using the parameters that are included in the request.
+// Insert: Creates an autoscaler in the specified project using the data
+// included in the request.
//
// - project: Project ID for this request.
-// - region: Name of the region of this request.
-func (r *PublicDelegatedPrefixesService) Insert(project string, region string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesInsertCall {
- c := &PublicDelegatedPrefixesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall {
+ c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicdelegatedprefix = publicdelegatedprefix
+ c.autoscaler = autoscaler
return c
}
@@ -58078,7 +57841,7 @@ func (r *PublicDelegatedPrefixesService) Insert(project string, region string, p
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicDelegatedPrefixesInsertCall {
+func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -58086,36 +57849,36 @@ func (c *PublicDelegatedPrefixesInsertCall) RequestId(requestId string) *PublicD
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesInsertCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesInsertCall {
+func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesInsertCall) Context(ctx context.Context) *PublicDelegatedPrefixesInsertCall {
+func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesInsertCall) Header() http.Header {
+func (c *RegionAutoscalersInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix)
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
if err != nil {
return nil, err
}
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
@@ -58129,12 +57892,12 @@ func (c *PublicDelegatedPrefixesInsertCall) doRequest(alt string) (*http.Respons
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.insert" call.
+// Do executes the "compute.regionAutoscalers.insert" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -58166,7 +57929,7 @@ func (c *PublicDelegatedPrefixesInsertCall) Do(opts ...googleapi.CallOption) (*O
return ret, nil
}
-type PublicDelegatedPrefixesListCall struct {
+type RegionAutoscalersListCall struct {
s *Service
project string
region string
@@ -58176,12 +57939,12 @@ type PublicDelegatedPrefixesListCall struct {
header_ http.Header
}
-// List: Lists the PublicDelegatedPrefixes for a project in the given region.
+// List: Retrieves a list of autoscalers contained within the specified region.
//
// - project: Project ID for this request.
-// - region: Name of the region of this request.
-func (r *PublicDelegatedPrefixesService) List(project string, region string) *PublicDelegatedPrefixesListCall {
- c := &PublicDelegatedPrefixesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall {
+ c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
return c
@@ -58219,7 +57982,7 @@ func (r *PublicDelegatedPrefixesService) List(project string, region string) *Pu
// filter for instances that do not end with name "instance", you would use
// `name ne .*instance`. You cannot combine constraints on multiple fields
// using regular expressions.
-func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall {
c.urlParams_.Set("filter", filter)
return c
}
@@ -58229,7 +57992,7 @@ func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegated
// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
// can be used to get the next page of results in subsequent list requests.
// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall {
c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
return c
}
@@ -58242,7 +58005,7 @@ func (c *PublicDelegatedPrefixesListCall) MaxResults(maxResults int64) *PublicDe
// chronological order (newest result first). Use this to sort resources like
// operations so that the newest operation is returned first. Currently, only
// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
@@ -58250,7 +58013,7 @@ func (c *PublicDelegatedPrefixesListCall) OrderBy(orderBy string) *PublicDelegat
// PageToken sets the optional parameter "pageToken": Specifies a page token to
// use. Set `pageToken` to the `nextPageToken` returned by a previous list
// request to get the next page of results.
-func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
@@ -58260,7 +58023,7 @@ func (c *PublicDelegatedPrefixesListCall) PageToken(pageToken string) *PublicDel
// of failure. The default value is false. For example, when partial success
// behavior is enabled, aggregatedList for a single zone scope either returns
// all resources in the zone or no resources, with an error code.
-func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall {
c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
return c
}
@@ -58268,7 +58031,7 @@ func (c *PublicDelegatedPrefixesListCall) ReturnPartialSuccess(returnPartialSucc
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -58276,27 +58039,27 @@ func (c *PublicDelegatedPrefixesListCall) Fields(s ...googleapi.Field) *PublicDe
// IfNoneMatch sets an optional parameter which makes the operation fail if the
// object's ETag matches the given value. This is useful for getting updates
// only after the object has changed since the last request.
-func (c *PublicDelegatedPrefixesListCall) IfNoneMatch(entityTag string) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesListCall) Context(ctx context.Context) *PublicDelegatedPrefixesListCall {
+func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesListCall) Header() http.Header {
+func (c *RegionAutoscalersListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -58304,7 +58067,7 @@ func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response,
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
@@ -58318,13 +58081,13 @@ func (c *PublicDelegatedPrefixesListCall) doRequest(alt string) (*http.Response,
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.list" call.
+// Do executes the "compute.regionAutoscalers.list" call.
// Any non-2xx status code is an error. Response headers are in either
-// *PublicDelegatedPrefixList.ServerResponse.Header or (if a response was
-// returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*PublicDelegatedPrefixList, error) {
+// *RegionAutoscalerList.ServerResponse.Header or (if a response was returned
+// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified was
+// returned.
+func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -58343,7 +58106,7 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub
if err := googleapi.CheckResponse(res); err != nil {
return nil, gensupport.WrapError(err)
}
- ret := &PublicDelegatedPrefixList{
+ ret := &RegionAutoscalerList{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
@@ -58359,7 +58122,7 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
-func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*PublicDelegatedPrefixList) error) error {
+func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken"))
for {
@@ -58377,31 +58140,34 @@ func (c *PublicDelegatedPrefixesListCall) Pages(ctx context.Context, f func(*Pub
}
}
-type PublicDelegatedPrefixesPatchCall struct {
- s *Service
- project string
- region string
- publicDelegatedPrefix string
- publicdelegatedprefix *PublicDelegatedPrefix
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersPatchCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler *Autoscaler
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Patch: Patches the specified PublicDelegatedPrefix resource with the data
-// included in the request. This method supports PATCH semantics and uses JSON
-// merge patch format and processing rules.
+// Patch: Updates an autoscaler in the specified project using the data
+// included in the request. This method supports PATCH semantics and uses the
+// JSON merge patch format and processing rules.
//
-// - project: Project ID for this request.
-// - publicDelegatedPrefix: Name of the PublicDelegatedPrefix resource to
-// patch.
-// - region: Name of the region for this request.
-func (r *PublicDelegatedPrefixesService) Patch(project string, region string, publicDelegatedPrefix string, publicdelegatedprefix *PublicDelegatedPrefix) *PublicDelegatedPrefixesPatchCall {
- c := &PublicDelegatedPrefixesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall {
+ c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicDelegatedPrefix = publicDelegatedPrefix
- c.publicdelegatedprefix = publicdelegatedprefix
+ c.autoscaler = autoscaler
+ return c
+}
+
+// Autoscaler sets the optional parameter "autoscaler": Name of the autoscaler
+// to patch.
+func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall {
+ c.urlParams_.Set("autoscaler", autoscaler)
return c
}
@@ -58415,7 +58181,7 @@ func (r *PublicDelegatedPrefixesService) Patch(project string, region string, pu
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDelegatedPrefixesPatchCall {
+func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -58423,36 +58189,36 @@ func (c *PublicDelegatedPrefixesPatchCall) RequestId(requestId string) *PublicDe
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesPatchCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesPatchCall {
+func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesPatchCall) Context(ctx context.Context) *PublicDelegatedPrefixesPatchCall {
+func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesPatchCall) Header() http.Header {
+func (c *RegionAutoscalersPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response, error) {
+func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publicdelegatedprefix)
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
if err != nil {
return nil, err
}
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
@@ -58460,19 +58226,18 @@ func (c *PublicDelegatedPrefixesPatchCall) doRequest(alt string) (*http.Response
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ "project": c.project,
+ "region": c.region,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.patch" call.
+// Do executes the "compute.regionAutoscalers.patch" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -58504,28 +58269,33 @@ func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Op
return ret, nil
}
-type PublicDelegatedPrefixesWithdrawCall struct {
- s *Service
- project string
- region string
- publicDelegatedPrefix string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
+type RegionAutoscalersUpdateCall struct {
+ s *Service
+ project string
+ region string
+ autoscaler *Autoscaler
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
}
-// Withdraw: Withdraws the specified PublicDelegatedPrefix in the given region.
+// Update: Updates an autoscaler in the specified project using the data
+// included in the request.
//
-// - project: Project ID for this request.
-// - publicDelegatedPrefix: The name of the public delegated prefix. It should
-// comply with RFC1035.
-// - region: The name of the region where the public delegated prefix is
-// located. It should comply with RFC1035.
-func (r *PublicDelegatedPrefixesService) Withdraw(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesWithdrawCall {
- c := &PublicDelegatedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - project: Project ID for this request.
+// - region: Name of the region scoping this request.
+func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall {
+ c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.project = project
c.region = region
- c.publicDelegatedPrefix = publicDelegatedPrefix
+ c.autoscaler = autoscaler
+ return c
+}
+
+// Autoscaler sets the optional parameter "autoscaler": Name of the autoscaler
+// to update.
+func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall {
+ c.urlParams_.Set("autoscaler", autoscaler)
return c
}
@@ -58539,7 +58309,7 @@ func (r *PublicDelegatedPrefixesService) Withdraw(project string, region string,
// prevents clients from accidentally creating duplicate commitments. The
// request ID must be a valid UUID with the exception that zero UUID is not
// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *PublicDelegatedPrefixesWithdrawCall) RequestId(requestId string) *PublicDelegatedPrefixesWithdrawCall {
+func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall {
c.urlParams_.Set("requestId", requestId)
return c
}
@@ -58547,52 +58317,55 @@ func (c *PublicDelegatedPrefixesWithdrawCall) RequestId(requestId string) *Publi
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
// details.
-func (c *PublicDelegatedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesWithdrawCall {
+func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method.
-func (c *PublicDelegatedPrefixesWithdrawCall) Context(ctx context.Context) *PublicDelegatedPrefixesWithdrawCall {
+func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns a http.Header that can be modified by the caller to add
// headers to the request.
-func (c *PublicDelegatedPrefixesWithdrawCall) Header() http.Header {
+func (c *RegionAutoscalersUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *PublicDelegatedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
+func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
+ if err != nil {
+ return nil, err
+ }
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
+ req, err := http.NewRequest("PUT", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "publicDelegatedPrefix": c.publicDelegatedPrefix,
+ "project": c.project,
+ "region": c.region,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
-// Do executes the "compute.publicDelegatedPrefixes.withdraw" call.
+// Do executes the "compute.regionAutoscalers.update" call.
// Any non-2xx status code is an error. Response headers are in either
// *Operation.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was returned.
-func (c *PublicDelegatedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
+func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
diff --git a/vendor/google.golang.org/api/compute/v1/compute3-gen.go b/vendor/google.golang.org/api/compute/v1/compute3-gen.go
index 71248bab0ccb5..1e08b6980c5aa 100644
--- a/vendor/google.golang.org/api/compute/v1/compute3-gen.go
+++ b/vendor/google.golang.org/api/compute/v1/compute3-gen.go
@@ -16,828 +16,6 @@ import (
gensupport "google.golang.org/api/internal/gensupport"
)
-type RegionAutoscalersDeleteCall struct {
- s *Service
- project string
- region string
- autoscaler string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Deletes the specified autoscaler.
-//
-// - autoscaler: Name of the autoscaler to delete.
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Delete(project string, region string, autoscaler string) *RegionAutoscalersDeleteCall {
- c := &RegionAutoscalersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *RegionAutoscalersDeleteCall) RequestId(requestId string) *RegionAutoscalersDeleteCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersDeleteCall) Fields(s ...googleapi.Field) *RegionAutoscalersDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersDeleteCall) Context(ctx context.Context) *RegionAutoscalersDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "autoscaler": c.autoscaler,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.delete" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type RegionAutoscalersGetCall struct {
- s *Service
- project string
- region string
- autoscaler string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns the specified autoscaler.
-//
-// - autoscaler: Name of the autoscaler to return.
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Get(project string, region string, autoscaler string) *RegionAutoscalersGetCall {
- c := &RegionAutoscalersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersGetCall) Fields(s ...googleapi.Field) *RegionAutoscalersGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *RegionAutoscalersGetCall) IfNoneMatch(entityTag string) *RegionAutoscalersGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersGetCall) Context(ctx context.Context) *RegionAutoscalersGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers/{autoscaler}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- "autoscaler": c.autoscaler,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.get" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Autoscaler.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Autoscaler{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type RegionAutoscalersInsertCall struct {
- s *Service
- project string
- region string
- autoscaler *Autoscaler
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates an autoscaler in the specified project using the data
-// included in the request.
-//
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Insert(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersInsertCall {
- c := &RegionAutoscalersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *RegionAutoscalersInsertCall) RequestId(requestId string) *RegionAutoscalersInsertCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersInsertCall) Fields(s ...googleapi.Field) *RegionAutoscalersInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersInsertCall) Context(ctx context.Context) *RegionAutoscalersInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.insert" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type RegionAutoscalersListCall struct {
- s *Service
- project string
- region string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves a list of autoscalers contained within the specified region.
-//
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) List(project string, region string) *RegionAutoscalersListCall {
- c := &RegionAutoscalersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- return c
-}
-
-// Filter sets the optional parameter "filter": A filter expression that
-// filters resources listed in the response. Most Compute resources support two
-// types of filter expressions: expressions that support regular expressions
-// and expressions that follow API improvement proposal AIP-160. These two
-// types of filter expressions cannot be mixed in one request. If you want to
-// use AIP-160, your expression must specify the field name, an operator, and
-// the value that you want to use for filtering. The value must be a string, a
-// number, or a boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`,
-// `>=` or `:`. For example, if you are filtering Compute Engine instances, you
-// can exclude instances named `example-instance` by specifying `name !=
-// example-instance`. The `:*` comparison can be used to test whether a key has
-// been defined. For example, to find all objects with `owner` label use: ```
-// labels.owner:* ``` You can also filter nested fields. For example, you could
-// specify `scheduling.automaticRestart = false` to include instances only if
-// they are not scheduled for automatic restarts. You can use filtering on
-// nested fields to filter based on resource labels. To filter on multiple
-// expressions, provide each separate expression within parentheses. For
-// example: ``` (scheduling.automaticRestart = true) (cpuPlatform = "Intel
-// Skylake") ``` By default, each expression is an `AND` expression. However,
-// you can include `AND` and `OR` expressions explicitly. For example: ```
-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND
-// (scheduling.automaticRestart = true) ``` If you want to use a regular
-// expression, use the `eq` (equal) or `ne` (not equal) operator against a
-// single un-parenthesized expression with or without quotes or against
-// multiple parenthesized expressions. Examples: `fieldname eq unquoted
-// literal` `fieldname eq 'single quoted literal'` `fieldname eq "double quoted
-// literal" `(fieldname1 eq literal) (fieldname2 ne "literal")` The literal
-// value is interpreted as a regular expression using Google RE2 library
-// syntax. The literal value must match the entire field. For example, to
-// filter for instances that do not end with name "instance", you would use
-// `name ne .*instance`. You cannot combine constraints on multiple fields
-// using regular expressions.
-func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall {
- c.urlParams_.Set("filter", filter)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": The maximum number of
-// results per page that should be returned. If the number of available results
-// is larger than `maxResults`, Compute Engine returns a `nextPageToken` that
-// can be used to get the next page of results in subsequent list requests.
-// Acceptable values are `0` to `500`, inclusive. (Default: `500`)
-func (c *RegionAutoscalersListCall) MaxResults(maxResults int64) *RegionAutoscalersListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// OrderBy sets the optional parameter "orderBy": Sorts list results by a
-// certain order. By default, results are returned in alphanumerical order
-// based on the resource name. You can also sort results in descending order
-// based on the creation timestamp using `orderBy="creationTimestamp desc".
-// This sorts results based on the `creationTimestamp` field in reverse
-// chronological order (newest result first). Use this to sort resources like
-// operations so that the newest operation is returned first. Currently, only
-// sorting by `name` or `creationTimestamp desc` is supported.
-func (c *RegionAutoscalersListCall) OrderBy(orderBy string) *RegionAutoscalersListCall {
- c.urlParams_.Set("orderBy", orderBy)
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Specifies a page token to
-// use. Set `pageToken` to the `nextPageToken` returned by a previous list
-// request to get the next page of results.
-func (c *RegionAutoscalersListCall) PageToken(pageToken string) *RegionAutoscalersListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// ReturnPartialSuccess sets the optional parameter "returnPartialSuccess":
-// Opt-in for partial success behavior which provides partial results in case
-// of failure. The default value is false. For example, when partial success
-// behavior is enabled, aggregatedList for a single zone scope either returns
-// all resources in the zone or no resources, with an error code.
-func (c *RegionAutoscalersListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionAutoscalersListCall {
- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersListCall) Fields(s ...googleapi.Field) *RegionAutoscalersListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets an optional parameter which makes the operation fail if the
-// object's ETag matches the given value. This is useful for getting updates
-// only after the object has changed since the last request.
-func (c *RegionAutoscalersListCall) IfNoneMatch(entityTag string) *RegionAutoscalersListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersListCall) Context(ctx context.Context) *RegionAutoscalersListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_)
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.list" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *RegionAutoscalerList.ServerResponse.Header or (if a response was returned
-// at all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified was
-// returned.
-func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAutoscalerList, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &RegionAutoscalerList{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *RegionAutoscalersListCall) Pages(ctx context.Context, f func(*RegionAutoscalerList) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken"))
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-type RegionAutoscalersPatchCall struct {
- s *Service
- project string
- region string
- autoscaler *Autoscaler
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Patch: Updates an autoscaler in the specified project using the data
-// included in the request. This method supports PATCH semantics and uses the
-// JSON merge patch format and processing rules.
-//
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Patch(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersPatchCall {
- c := &RegionAutoscalersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// Autoscaler sets the optional parameter "autoscaler": Name of the autoscaler
-// to patch.
-func (c *RegionAutoscalersPatchCall) Autoscaler(autoscaler string) *RegionAutoscalersPatchCall {
- c.urlParams_.Set("autoscaler", autoscaler)
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *RegionAutoscalersPatchCall) RequestId(requestId string) *RegionAutoscalersPatchCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersPatchCall) Fields(s ...googleapi.Field) *RegionAutoscalersPatchCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersPatchCall) Context(ctx context.Context) *RegionAutoscalersPatchCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersPatchCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersPatchCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("PATCH", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.patch" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
-type RegionAutoscalersUpdateCall struct {
- s *Service
- project string
- region string
- autoscaler *Autoscaler
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Update: Updates an autoscaler in the specified project using the data
-// included in the request.
-//
-// - project: Project ID for this request.
-// - region: Name of the region scoping this request.
-func (r *RegionAutoscalersService) Update(project string, region string, autoscaler *Autoscaler) *RegionAutoscalersUpdateCall {
- c := &RegionAutoscalersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- c.autoscaler = autoscaler
- return c
-}
-
-// Autoscaler sets the optional parameter "autoscaler": Name of the autoscaler
-// to update.
-func (c *RegionAutoscalersUpdateCall) Autoscaler(autoscaler string) *RegionAutoscalersUpdateCall {
- c.urlParams_.Set("autoscaler", autoscaler)
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional request ID to
-// identify requests. Specify a unique request ID so that if you must retry
-// your request, the server will know to ignore the request if it has already
-// been completed. For example, consider a situation where you make an initial
-// request and the request times out. If you make the request again with the
-// same request ID, the server can check if original operation with the same
-// request ID was received, and if so, will ignore the second request. This
-// prevents clients from accidentally creating duplicate commitments. The
-// request ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *RegionAutoscalersUpdateCall) RequestId(requestId string) *RegionAutoscalersUpdateCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
-// details.
-func (c *RegionAutoscalersUpdateCall) Fields(s ...googleapi.Field) *RegionAutoscalersUpdateCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method.
-func (c *RegionAutoscalersUpdateCall) Context(ctx context.Context) *RegionAutoscalersUpdateCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns a http.Header that can be modified by the caller to add
-// headers to the request.
-func (c *RegionAutoscalersUpdateCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionAutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler)
- if err != nil {
- return nil, err
- }
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/autoscalers")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("PUT", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionAutoscalers.update" call.
-// Any non-2xx status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at all) in
-// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
-// whether the returned error was because http.StatusNotModified was returned.
-func (c *RegionAutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
-}
-
type RegionBackendServicesDeleteCall struct {
s *Service
project string
@@ -1650,8 +828,8 @@ type RegionBackendServicesListUsableCall struct {
header_ http.Header
}
-// ListUsable: Retrieves an aggregated list of all usable backend services in
-// the specified project in the given region.
+// ListUsable: Retrieves a list of all usable backend services in the specified
+// project in the given region.
//
// - project: Project ID for this request.
// - region: Name of the region scoping this request. It must be a string that
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index 86152a19fcc96..3bfd292b39a7e 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.197.0"
+const Version = "0.201.0"
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index 597daf0a72464..09b7f6487aa07 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -32,6 +32,11 @@
"endpointUrl": "https://storage.europe-west3.rep.googleapis.com/",
"location": "europe-west3"
},
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.europe-west8.rep.googleapis.com/",
+ "location": "europe-west8"
+ },
{
"description": "Regional Endpoint",
"endpointUrl": "https://storage.europe-west9.rep.googleapis.com/",
@@ -41,9 +46,54 @@
"description": "Regional Endpoint",
"endpointUrl": "https://storage.me-central2.rep.googleapis.com/",
"location": "me-central2"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-central1.rep.googleapis.com/",
+ "location": "us-central1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-east1.rep.googleapis.com/",
+ "location": "us-east1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-east4.rep.googleapis.com/",
+ "location": "us-east4"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-east5.rep.googleapis.com/",
+ "location": "us-east5"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-south1.rep.googleapis.com/",
+ "location": "us-south1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-west1.rep.googleapis.com/",
+ "location": "us-west1"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-west2.rep.googleapis.com/",
+ "location": "us-west2"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-west3.rep.googleapis.com/",
+ "location": "us-west3"
+ },
+ {
+ "description": "Regional Endpoint",
+ "endpointUrl": "https://storage.us-west4.rep.googleapis.com/",
+ "location": "us-west4"
}
],
- "etag": "\"38363036373236373330353534313035333932\"",
+ "etag": "\"3132333635343336333933383332343134323139\"",
"icons": {
"x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
"x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
@@ -1029,6 +1079,34 @@
"https://www.googleapis.com/auth/devstorage.full_control"
]
},
+ "relocate": {
+ "description": "Initiates a long-running Relocate Bucket operation on the specified bucket.",
+ "httpMethod": "POST",
+ "id": "storage.buckets.relocate",
+ "parameterOrder": [
+ "bucket"
+ ],
+ "parameters": {
+ "bucket": {
+ "description": "Name of the bucket to be moved.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "b/{bucket}/relocate",
+ "request": {
+ "$ref": "RelocateBucketRequest"
+ },
+ "response": {
+ "$ref": "GoogleLongrunningOperation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
"restore": {
"description": "Restores a soft-deleted bucket.",
"httpMethod": "POST",
@@ -2829,6 +2907,11 @@
"location": "query",
"type": "string"
},
+ "restoreToken": {
+ "description": "Restore token used to differentiate soft-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets and if softDeleted is set to true. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.",
+ "location": "query",
+ "type": "string"
+ },
"softDeleted": {
"description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).",
"location": "query",
@@ -3304,6 +3387,11 @@
"location": "query",
"type": "string"
},
+ "restoreToken": {
+ "description": "Restore token used to differentiate sof-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.",
+ "location": "query",
+ "type": "string"
+ },
"userProject": {
"description": "The project to be billed for this request. Required for Requester Pays buckets.",
"location": "query",
@@ -3781,6 +3869,38 @@
},
"operations": {
"methods": {
+ "advanceRelocateBucket": {
+ "description": "Starts asynchronous advancement of the relocate bucket operation in the case of required write downtime, to allow it to lock the bucket at the source location, and proceed with the bucket location swap. The server makes a best effort to advance the relocate bucket operation, but success is not guaranteed.",
+ "httpMethod": "POST",
+ "id": "storage.buckets.operations.advanceRelocateBucket",
+ "parameterOrder": [
+ "bucket",
+ "operationId"
+ ],
+ "parameters": {
+ "bucket": {
+ "description": "Name of the bucket to advance the relocate for.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ },
+ "operationId": {
+ "description": "ID of the operation resource.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ }
+ },
+ "path": "b/{bucket}/operations/{operationId}/advanceRelocateBucket",
+ "request": {
+ "$ref": "AdvanceRelocateBucketOperationRequest"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
"cancel": {
"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.",
"httpMethod": "POST",
@@ -4136,9 +4256,26 @@
}
}
},
- "revision": "20240819",
+ "revision": "20241008",
"rootUrl": "https://storage.googleapis.com/",
"schemas": {
+ "AdvanceRelocateBucketOperationRequest": {
+ "description": "An AdvanceRelocateBucketOperation request.",
+ "id": "AdvanceRelocateBucketOperationRequest",
+ "properties": {
+ "expireTime": {
+ "description": "Specifies the time when the relocation will revert to the sync stage if the relocation hasn't succeeded.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "ttl": {
+ "description": "Specifies the duration after which the relocation will revert to the sync stage if the relocation hasn't succeeded. Optional, if not supplied, a default value of 12h will be used.",
+ "format": "google-duration",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"AnywhereCache": {
"description": "An Anywhere Cache instance.",
"id": "AnywhereCache",
@@ -5597,6 +5734,10 @@
},
"type": "object"
},
+ "restoreToken": {
+ "description": "Restore token used to differentiate deleted objects with the same name and generation. This field is only returned for deleted objects in hierarchical namespace buckets.",
+ "type": "string"
+ },
"retention": {
"description": "A collection of object level retention parameters.",
"properties": {
@@ -5865,6 +6006,34 @@
},
"type": "object"
},
+ "RelocateBucketRequest": {
+ "description": "A Relocate Bucket request.",
+ "id": "RelocateBucketRequest",
+ "properties": {
+ "destinationCustomPlacementConfig": {
+ "description": "The bucket's new custom placement configuration if relocating to a Custom Dual Region.",
+ "properties": {
+ "dataLocations": {
+ "description": "The list of regional locations in which data is placed.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "destinationLocation": {
+ "description": "The new location the bucket will be relocated to.",
+ "type": "string"
+ },
+ "validateOnly": {
+ "description": "If true, validate the operation, but do not actually relocate the bucket.",
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+ },
"RewriteResponse": {
"description": "A rewrite response.",
"id": "RewriteResponse",
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index b16e3f227b213..2c11b2d8d61f7 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -343,6 +343,34 @@ type ProjectsServiceAccountService struct {
s *Service
}
+// AdvanceRelocateBucketOperationRequest: An AdvanceRelocateBucketOperation
+// request.
+type AdvanceRelocateBucketOperationRequest struct {
+ // ExpireTime: Specifies the time when the relocation will revert to the sync
+ // stage if the relocation hasn't succeeded.
+ ExpireTime string `json:"expireTime,omitempty"`
+ // Ttl: Specifies the duration after which the relocation will revert to the
+ // sync stage if the relocation hasn't succeeded. Optional, if not supplied, a
+ // default value of 12h will be used.
+ Ttl string `json:"ttl,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "ExpireTime") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "ExpireTime") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s AdvanceRelocateBucketOperationRequest) MarshalJSON() ([]byte, error) {
+ type NoMethod AdvanceRelocateBucketOperationRequest
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// AnywhereCache: An Anywhere Cache instance.
type AnywhereCache struct {
// AdmissionPolicy: The cache-level entry admission policy.
@@ -2236,6 +2264,10 @@ type Object struct {
// Owner: The owner of the object. This will always be the uploader of the
// object.
Owner *ObjectOwner `json:"owner,omitempty"`
+ // RestoreToken: Restore token used to differentiate deleted objects with the
+ // same name and generation. This field is only returned for deleted objects in
+ // hierarchical namespace buckets.
+ RestoreToken string `json:"restoreToken,omitempty"`
// Retention: A collection of object level retention parameters.
Retention *ObjectRetention `json:"retention,omitempty"`
// RetentionExpirationTime: A server-determined value that specifies the
@@ -2639,6 +2671,59 @@ func (s PolicyBindings) MarshalJSON() ([]byte, error) {
return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
}
+// RelocateBucketRequest: A Relocate Bucket request.
+type RelocateBucketRequest struct {
+ // DestinationCustomPlacementConfig: The bucket's new custom placement
+ // configuration if relocating to a Custom Dual Region.
+ DestinationCustomPlacementConfig *RelocateBucketRequestDestinationCustomPlacementConfig `json:"destinationCustomPlacementConfig,omitempty"`
+ // DestinationLocation: The new location the bucket will be relocated to.
+ DestinationLocation string `json:"destinationLocation,omitempty"`
+ // ValidateOnly: If true, validate the operation, but do not actually relocate
+ // the bucket.
+ ValidateOnly bool `json:"validateOnly,omitempty"`
+ // ForceSendFields is a list of field names (e.g.
+ // "DestinationCustomPlacementConfig") to unconditionally include in API
+ // requests. By default, fields with empty or default values are omitted from
+ // API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g.
+ // "DestinationCustomPlacementConfig") to include in API requests with the JSON
+ // null value. By default, fields with empty values are omitted from API
+ // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for
+ // more details.
+ NullFields []string `json:"-"`
+}
+
+func (s RelocateBucketRequest) MarshalJSON() ([]byte, error) {
+ type NoMethod RelocateBucketRequest
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
+// RelocateBucketRequestDestinationCustomPlacementConfig: The bucket's new
+// custom placement configuration if relocating to a Custom Dual Region.
+type RelocateBucketRequestDestinationCustomPlacementConfig struct {
+ // DataLocations: The list of regional locations in which data is placed.
+ DataLocations []string `json:"dataLocations,omitempty"`
+ // ForceSendFields is a list of field names (e.g. "DataLocations") to
+ // unconditionally include in API requests. By default, fields with empty or
+ // default values are omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more
+ // details.
+ ForceSendFields []string `json:"-"`
+ // NullFields is a list of field names (e.g. "DataLocations") to include in API
+ // requests with the JSON null value. By default, fields with empty values are
+ // omitted from API requests. See
+ // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details.
+ NullFields []string `json:"-"`
+}
+
+func (s RelocateBucketRequestDestinationCustomPlacementConfig) MarshalJSON() ([]byte, error) {
+ type NoMethod RelocateBucketRequestDestinationCustomPlacementConfig
+ return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields)
+}
+
// RewriteResponse: A rewrite response.
type RewriteResponse struct {
// Done: true if the copy is finished; otherwise, false if the copy is in
@@ -5330,6 +5415,109 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
return ret, nil
}
+type BucketsRelocateCall struct {
+ s *Service
+ bucket string
+ relocatebucketrequest *RelocateBucketRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// Relocate: Initiates a long-running Relocate Bucket operation on the
+// specified bucket.
+//
+// - bucket: Name of the bucket to be moved.
+func (r *BucketsService) Relocate(bucket string, relocatebucketrequest *RelocateBucketRequest) *BucketsRelocateCall {
+ c := &BucketsRelocateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.relocatebucketrequest = relocatebucketrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *BucketsRelocateCall) Fields(s ...googleapi.Field) *BucketsRelocateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *BucketsRelocateCall) Context(ctx context.Context) *BucketsRelocateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *BucketsRelocateCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.relocatebucketrequest)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/relocate")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.buckets.relocate" call.
+// Any non-2xx status code is an error. Response headers are in either
+// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was because
+// http.StatusNotModified was returned.
+func (c *BucketsRelocateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, gensupport.WrapError(&googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, gensupport.WrapError(err)
+ }
+ ret := &GoogleLongrunningOperation{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ target := &ret
+ if err := gensupport.DecodeResponse(target, res); err != nil {
+ return nil, err
+ }
+ return ret, nil
+}
+
type BucketsRestoreCall struct {
s *Service
bucket string
@@ -9961,6 +10149,17 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
return c
}
+// RestoreToken sets the optional parameter "restoreToken": Restore token used
+// to differentiate soft-deleted objects with the same name and generation.
+// Only applicable for hierarchical namespace buckets and if softDeleted is set
+// to true. This parameter is optional, and is only required in the rare case
+// when there are multiple soft-deleted objects with the same name and
+// generation.
+func (c *ObjectsGetCall) RestoreToken(restoreToken string) *ObjectsGetCall {
+ c.urlParams_.Set("restoreToken", restoreToken)
+ return c
+}
+
// SoftDeleted sets the optional parameter "softDeleted": If true, only
// soft-deleted object versions will be listed. The default is false. For more
// information, see Soft Delete
@@ -11056,6 +11255,16 @@ func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall {
return c
}
+// RestoreToken sets the optional parameter "restoreToken": Restore token used
+// to differentiate sof-deleted objects with the same name and generation. Only
+// applicable for hierarchical namespace buckets. This parameter is optional,
+// and is only required in the rare case when there are multiple soft-deleted
+// objects with the same name and generation.
+func (c *ObjectsRestoreCall) RestoreToken(restoreToken string) *ObjectsRestoreCall {
+ c.urlParams_.Set("restoreToken", restoreToken)
+ return c
+}
+
// UserProject sets the optional parameter "userProject": The project to be
// billed for this request. Required for Requester Pays buckets.
func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall {
@@ -12074,6 +12283,92 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error)
return ret, nil
}
+type OperationsAdvanceRelocateBucketCall struct {
+ s *Service
+ bucket string
+ operationId string
+ advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+ header_ http.Header
+}
+
+// AdvanceRelocateBucket: Starts asynchronous advancement of the relocate
+// bucket operation in the case of required write downtime, to allow it to lock
+// the bucket at the source location, and proceed with the bucket location
+// swap. The server makes a best effort to advance the relocate bucket
+// operation, but success is not guaranteed.
+//
+// - bucket: Name of the bucket to advance the relocate for.
+// - operationId: ID of the operation resource.
+func (r *OperationsService) AdvanceRelocateBucket(bucket string, operationId string, advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest) *OperationsAdvanceRelocateBucketCall {
+ c := &OperationsAdvanceRelocateBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.operationId = operationId
+ c.advancerelocatebucketoperationrequest = advancerelocatebucketoperationrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more
+// details.
+func (c *OperationsAdvanceRelocateBucketCall) Fields(s ...googleapi.Field) *OperationsAdvanceRelocateBucketCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method.
+func (c *OperationsAdvanceRelocateBucketCall) Context(ctx context.Context) *OperationsAdvanceRelocateBucketCall {
+ c.ctx_ = ctx
+ return c
+}
+
+// Header returns a http.Header that can be modified by the caller to add
+// headers to the request.
+func (c *OperationsAdvanceRelocateBucketCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
+func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_)
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.advancerelocatebucketoperationrequest)
+ if err != nil {
+ return nil, err
+ }
+ c.urlParams_.Set("alt", alt)
+ c.urlParams_.Set("prettyPrint", "false")
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/advanceRelocateBucket")
+ urls += "?" + c.urlParams_.Encode()
+ req, err := http.NewRequest("POST", urls, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "operationId": c.operationId,
+ })
+ return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.buckets.operations.advanceRelocateBucket" call.
+func (c *OperationsAdvanceRelocateBucketCall) Do(opts ...googleapi.CallOption) error {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return gensupport.WrapError(err)
+ }
+ return nil
+}
+
type OperationsCancelCall struct {
s *Service
bucket string
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go
new file mode 100644
index 0000000000000..4af7f933c8bae
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/client_metrics.go
@@ -0,0 +1,277 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package opentelemetry
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc"
+ estats "google.golang.org/grpc/experimental/stats"
+ istats "google.golang.org/grpc/internal/stats"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
+
+ otelattribute "go.opentelemetry.io/otel/attribute"
+ otelmetric "go.opentelemetry.io/otel/metric"
+)
+
+type clientStatsHandler struct {
+ estats.MetricsRecorder
+ options Options
+ clientMetrics clientMetrics
+}
+
+func (h *clientStatsHandler) initializeMetrics() {
+ // Will set no metrics to record, logically making this stats handler a
+ // no-op.
+ if h.options.MetricsOptions.MeterProvider == nil {
+ return
+ }
+
+ meter := h.options.MetricsOptions.MeterProvider.Meter("grpc-go", otelmetric.WithInstrumentationVersion(grpc.Version))
+ if meter == nil {
+ return
+ }
+
+ metrics := h.options.MetricsOptions.Metrics
+ if metrics == nil {
+ metrics = DefaultMetrics()
+ }
+
+ h.clientMetrics.attemptStarted = createInt64Counter(metrics.Metrics(), "grpc.client.attempt.started", meter, otelmetric.WithUnit("attempt"), otelmetric.WithDescription("Number of client call attempts started."))
+ h.clientMetrics.attemptDuration = createFloat64Histogram(metrics.Metrics(), "grpc.client.attempt.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("End-to-end time taken to complete a client call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...))
+ h.clientMetrics.attemptSentTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.client.attempt.sent_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes sent per client call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...))
+ h.clientMetrics.attemptRcvdTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.client.attempt.rcvd_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes received per call attempt."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...))
+ h.clientMetrics.callDuration = createFloat64Histogram(metrics.Metrics(), "grpc.client.call.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("Time taken by gRPC to complete an RPC from application's perspective."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...))
+
+ rm := ®istryMetrics{
+ optionalLabels: h.options.MetricsOptions.OptionalLabels,
+ }
+ h.MetricsRecorder = rm
+ rm.registerMetrics(metrics, meter)
+}
+
+func (h *clientStatsHandler) unaryInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ ci := &callInfo{
+ target: cc.CanonicalTarget(),
+ method: h.determineMethod(method, opts...),
+ }
+ ctx = setCallInfo(ctx, ci)
+
+ if h.options.MetricsOptions.pluginOption != nil {
+ md := h.options.MetricsOptions.pluginOption.GetMetadata()
+ for k, vs := range md {
+ for _, v := range vs {
+ ctx = metadata.AppendToOutgoingContext(ctx, k, v)
+ }
+ }
+ }
+
+ startTime := time.Now()
+ err := invoker(ctx, method, req, reply, cc, opts...)
+ h.perCallMetrics(ctx, err, startTime, ci)
+ return err
+}
+
+// determineMethod determines the method to record attributes with. This will be
+// "other" if StaticMethod isn't specified or if method filter is set and
+// specifies, the method name as is otherwise.
+func (h *clientStatsHandler) determineMethod(method string, opts ...grpc.CallOption) string {
+ for _, opt := range opts {
+ if _, ok := opt.(grpc.StaticMethodCallOption); ok {
+ return removeLeadingSlash(method)
+ }
+ }
+ return "other"
+}
+
+func (h *clientStatsHandler) streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ ci := &callInfo{
+ target: cc.CanonicalTarget(),
+ method: h.determineMethod(method, opts...),
+ }
+ ctx = setCallInfo(ctx, ci)
+
+ if h.options.MetricsOptions.pluginOption != nil {
+ md := h.options.MetricsOptions.pluginOption.GetMetadata()
+ for k, vs := range md {
+ for _, v := range vs {
+ ctx = metadata.AppendToOutgoingContext(ctx, k, v)
+ }
+ }
+ }
+
+ startTime := time.Now()
+
+ callback := func(err error) {
+ h.perCallMetrics(ctx, err, startTime, ci)
+ }
+ opts = append([]grpc.CallOption{grpc.OnFinish(callback)}, opts...)
+ return streamer(ctx, desc, cc, method, opts...)
+}
+
+func (h *clientStatsHandler) perCallMetrics(ctx context.Context, err error, startTime time.Time, ci *callInfo) {
+ callLatency := float64(time.Since(startTime)) / float64(time.Second) // calculate ASAP
+ attrs := otelmetric.WithAttributeSet(otelattribute.NewSet(
+ otelattribute.String("grpc.method", ci.method),
+ otelattribute.String("grpc.target", ci.target),
+ otelattribute.String("grpc.status", canonicalString(status.Code(err))),
+ ))
+ h.clientMetrics.callDuration.Record(ctx, callLatency, attrs)
+}
+
+// TagConn exists to satisfy stats.Handler.
+func (h *clientStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn exists to satisfy stats.Handler.
+func (h *clientStatsHandler) HandleConn(context.Context, stats.ConnStats) {}
+
+// TagRPC implements per RPC attempt context management.
+func (h *clientStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ // Numerous stats handlers can be used for the same channel. The cluster
+ // impl balancer which writes to this will only write once, thus have this
+ // stats handler's per attempt scoped context point to the same optional
+ // labels map if set.
+ var labels *istats.Labels
+ if labels = istats.GetLabels(ctx); labels == nil {
+ labels = &istats.Labels{
+ // The defaults for all the per call labels from a plugin that
+ // executes on the callpath that this OpenTelemetry component
+ // currently supports.
+ TelemetryLabels: map[string]string{
+ "grpc.lb.locality": "",
+ },
+ }
+ ctx = istats.SetLabels(ctx, labels)
+ }
+ ai := &attemptInfo{ // populates information about RPC start.
+ startTime: time.Now(),
+ xdsLabels: labels.TelemetryLabels,
+ method: info.FullMethodName,
+ }
+ ri := &rpcInfo{
+ ai: ai,
+ }
+ return setRPCInfo(ctx, ri)
+}
+
+func (h *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ ri := getRPCInfo(ctx)
+ if ri == nil {
+ logger.Error("ctx passed into client side stats handler metrics event handling has no client attempt data present")
+ return
+ }
+ h.processRPCEvent(ctx, rs, ri.ai)
+}
+
+func (h *clientStatsHandler) processRPCEvent(ctx context.Context, s stats.RPCStats, ai *attemptInfo) {
+ switch st := s.(type) {
+ case *stats.Begin:
+ ci := getCallInfo(ctx)
+ if ci == nil {
+ logger.Error("ctx passed into client side stats handler metrics event handling has no metrics data present")
+ return
+ }
+
+ attrs := otelmetric.WithAttributeSet(otelattribute.NewSet(
+ otelattribute.String("grpc.method", ci.method),
+ otelattribute.String("grpc.target", ci.target),
+ ))
+ h.clientMetrics.attemptStarted.Add(ctx, 1, attrs)
+ case *stats.OutPayload:
+ atomic.AddInt64(&ai.sentCompressedBytes, int64(st.CompressedLength))
+ case *stats.InPayload:
+ atomic.AddInt64(&ai.recvCompressedBytes, int64(st.CompressedLength))
+ case *stats.InHeader:
+ h.setLabelsFromPluginOption(ai, st.Header)
+ case *stats.InTrailer:
+ h.setLabelsFromPluginOption(ai, st.Trailer)
+ case *stats.End:
+ h.processRPCEnd(ctx, ai, st)
+ default:
+ }
+}
+
+func (h *clientStatsHandler) setLabelsFromPluginOption(ai *attemptInfo, incomingMetadata metadata.MD) {
+ if ai.pluginOptionLabels == nil && h.options.MetricsOptions.pluginOption != nil {
+ labels := h.options.MetricsOptions.pluginOption.GetLabels(incomingMetadata)
+ if labels == nil {
+ labels = map[string]string{} // Shouldn't return a nil map. Make it empty if so to ignore future Get Calls for this Attempt.
+ }
+ ai.pluginOptionLabels = labels
+ }
+}
+
+func (h *clientStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, e *stats.End) {
+ ci := getCallInfo(ctx)
+ if ci == nil {
+ logger.Error("ctx passed into client side stats handler metrics event handling has no metrics data present")
+ return
+ }
+ latency := float64(time.Since(ai.startTime)) / float64(time.Second)
+ st := "OK"
+ if e.Error != nil {
+ s, _ := status.FromError(e.Error)
+ st = canonicalString(s.Code())
+ }
+
+ attributes := []otelattribute.KeyValue{
+ otelattribute.String("grpc.method", ci.method),
+ otelattribute.String("grpc.target", ci.target),
+ otelattribute.String("grpc.status", st),
+ }
+
+ for k, v := range ai.pluginOptionLabels {
+ attributes = append(attributes, otelattribute.String(k, v))
+ }
+
+ for _, o := range h.options.MetricsOptions.OptionalLabels {
+ // TODO: Add a filter for converting to unknown if not present in the
+ // CSM Plugin Option layer by adding an optional labels API.
+ if val, ok := ai.xdsLabels[o]; ok {
+ attributes = append(attributes, otelattribute.String(o, val))
+ }
+ }
+
+ // Allocate vararg slice once.
+ opts := []otelmetric.RecordOption{otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))}
+ h.clientMetrics.attemptDuration.Record(ctx, latency, opts...)
+ h.clientMetrics.attemptSentTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.sentCompressedBytes), opts...)
+ h.clientMetrics.attemptRcvdTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.recvCompressedBytes), opts...)
+}
+
+const (
+ // ClientAttemptStarted is the number of client call attempts started.
+ ClientAttemptStarted estats.Metric = "grpc.client.attempt.started"
+ // ClientAttemptDuration is the end-to-end time taken to complete a client
+ // call attempt.
+ ClientAttemptDuration estats.Metric = "grpc.client.attempt.duration"
+ // ClientAttemptSentCompressedTotalMessageSize is the compressed message
+ // bytes sent per client call attempt.
+ ClientAttemptSentCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.sent_total_compressed_message_size"
+ // ClientAttemptRcvdCompressedTotalMessageSize is the compressed message
+ // bytes received per call attempt.
+ ClientAttemptRcvdCompressedTotalMessageSize estats.Metric = "grpc.client.attempt.rcvd_total_compressed_message_size"
+ // ClientCallDuration is the time taken by gRPC to complete an RPC from
+ // application's perspective.
+ ClientCallDuration estats.Metric = "grpc.client.call.duration"
+)
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go b/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go
new file mode 100644
index 0000000000000..b595aa85ffbec
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/internal/pluginoption.go
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package internal defines the PluginOption interface.
+package internal
+
+import (
+ "google.golang.org/grpc/metadata"
+)
+
+// SetPluginOption sets the plugin option on Options.
+var SetPluginOption any // func(*Options, PluginOption)
+
+// PluginOption is the interface which represents a plugin option for the
+// OpenTelemetry instrumentation component. This plugin option emits labels from
+// metadata and also creates metadata containing labels. These labels are
+// intended to be added to applicable OpenTelemetry metrics recorded in the
+// OpenTelemetry instrumentation component.
+//
+// In the future, we hope to stabilize and expose this API to allow plugins to
+// inject labels of their choosing into metrics recorded.
+type PluginOption interface {
+ // GetMetadata creates a MD with metadata exchange labels.
+ GetMetadata() metadata.MD
+ // GetLabels emits labels to be attached to metrics for the RPC that
+ // contains the provided incomingMetadata.
+ GetLabels(incomingMetadata metadata.MD) map[string]string
+}
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go
new file mode 100644
index 0000000000000..cc5ad387fb4c4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/opentelemetry.go
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package opentelemetry implements opentelemetry instrumentation code for
+// gRPC-Go clients and servers.
+package opentelemetry
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+ otelinternal "google.golang.org/grpc/stats/opentelemetry/internal"
+
+ otelattribute "go.opentelemetry.io/otel/attribute"
+ otelmetric "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+)
+
+func init() {
+ otelinternal.SetPluginOption = func(o *Options, po otelinternal.PluginOption) {
+ o.MetricsOptions.pluginOption = po
+ }
+}
+
+var logger = grpclog.Component("otel-plugin")
+
+var canonicalString = internal.CanonicalString.(func(codes.Code) string)
+
+var joinDialOptions = internal.JoinDialOptions.(func(...grpc.DialOption) grpc.DialOption)
+
+// Options are the options for OpenTelemetry instrumentation.
+type Options struct {
+ // MetricsOptions are the metrics options for OpenTelemetry instrumentation.
+ MetricsOptions MetricsOptions
+}
+
+// MetricsOptions are the metrics options for OpenTelemetry instrumentation.
+type MetricsOptions struct {
+ // MeterProvider is the MeterProvider instance that will be used to create
+ // instruments. To enable metrics collection, set a meter provider. If
+ // unset, no metrics will be recorded. Any implementation knobs (i.e. views,
+ // bounds) set in the MeterProvider take precedence over the API calls from
+ // this interface. (i.e. it will create default views for unset views).
+ MeterProvider otelmetric.MeterProvider
+
+ // Metrics are the metrics to instrument. Will create instrument and record telemetry
+ // for corresponding metric supported by the client and server
+ // instrumentation components if applicable. If not set, the default metrics
+ // will be recorded.
+ Metrics *estats.Metrics
+
+ // MethodAttributeFilter is to record the method name of RPCs handled by
+ // grpc.UnknownServiceHandler, but take care to limit the values allowed, as
+ // allowing too many will increase cardinality and could cause severe memory
+ // or performance problems. On Client Side, pass a
+ // grpc.StaticMethodCallOption as a call option into Invoke or NewStream.
+ // This only applies for server side metrics.
+ MethodAttributeFilter func(string) bool
+
+ // OptionalLabels are labels received from LB Policies that this component
+ // should add to metrics that record after receiving incoming metadata.
+ OptionalLabels []string
+
+ // pluginOption is used to get labels to attach to certain metrics, if set.
+ pluginOption otelinternal.PluginOption
+}
+
+// DialOption returns a dial option which enables OpenTelemetry instrumentation
+// code for a grpc.ClientConn.
+//
+// Client applications interested in instrumenting their grpc.ClientConn should
+// pass the dial option returned from this function as a dial option to
+// grpc.NewClient().
+//
+// For the metrics supported by this instrumentation code, specify the client
+// metrics to record in metrics options. Also provide an implementation of a
+// MeterProvider. If the passed in Meter Provider does not have the view
+// configured for an individual metric turned on, the API call in this component
+// will create a default view for that metric.
+func DialOption(o Options) grpc.DialOption {
+ csh := &clientStatsHandler{options: o}
+ csh.initializeMetrics()
+ return joinDialOptions(grpc.WithChainUnaryInterceptor(csh.unaryInterceptor), grpc.WithChainStreamInterceptor(csh.streamInterceptor), grpc.WithStatsHandler(csh))
+}
+
+var joinServerOptions = internal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption)
+
+// ServerOption returns a server option which enables OpenTelemetry
+// instrumentation code for a grpc.Server.
+//
+// Server applications interested in instrumenting their grpc.Server should pass
+// the server option returned from this function as an argument to
+// grpc.NewServer().
+//
+// For the metrics supported by this instrumentation code, specify the server
+// metrics to record in metrics options. Also provide an implementation of a
+// MeterProvider. If the passed in Meter Provider does not have the view
+// configured for an individual metric turned on, the API call in this component
+// will create a default view for that metric.
+func ServerOption(o Options) grpc.ServerOption {
+ ssh := &serverStatsHandler{options: o}
+ ssh.initializeMetrics()
+ return joinServerOptions(grpc.ChainUnaryInterceptor(ssh.unaryInterceptor), grpc.ChainStreamInterceptor(ssh.streamInterceptor), grpc.StatsHandler(ssh))
+}
+
+// callInfo is information pertaining to the lifespan of the RPC client side.
+type callInfo struct {
+ target string
+
+ method string
+}
+
+type callInfoKey struct{}
+
+func setCallInfo(ctx context.Context, ci *callInfo) context.Context {
+ return context.WithValue(ctx, callInfoKey{}, ci)
+}
+
+// getCallInfo returns the callInfo stored in the context, or nil
+// if there isn't one.
+func getCallInfo(ctx context.Context) *callInfo {
+ ci, _ := ctx.Value(callInfoKey{}).(*callInfo)
+ return ci
+}
+
+// rpcInfo is RPC information scoped to the RPC attempt life span client side,
+// and the RPC life span server side.
+type rpcInfo struct {
+ ai *attemptInfo
+}
+
+type rpcInfoKey struct{}
+
+func setRPCInfo(ctx context.Context, ri *rpcInfo) context.Context {
+ return context.WithValue(ctx, rpcInfoKey{}, ri)
+}
+
+// getRPCInfo returns the rpcInfo stored in the context, or nil
+// if there isn't one.
+func getRPCInfo(ctx context.Context) *rpcInfo {
+ ri, _ := ctx.Value(rpcInfoKey{}).(*rpcInfo)
+ return ri
+}
+
+func removeLeadingSlash(mn string) string {
+ return strings.TrimLeft(mn, "/")
+}
+
+// attemptInfo is RPC information scoped to the RPC attempt life span client
+// side, and the RPC life span server side.
+type attemptInfo struct {
+ // access these counts atomically for hedging in the future:
+ // number of bytes after compression (within each message) from side (client
+ // || server).
+ sentCompressedBytes int64
+ // number of compressed bytes received (within each message) received on
+ // side (client || server).
+ recvCompressedBytes int64
+
+ startTime time.Time
+ method string
+
+ pluginOptionLabels map[string]string // pluginOptionLabels to attach to metrics emitted
+ xdsLabels map[string]string
+}
+
+type clientMetrics struct {
+ // "grpc.client.attempt.started"
+ attemptStarted otelmetric.Int64Counter
+ // "grpc.client.attempt.duration"
+ attemptDuration otelmetric.Float64Histogram
+ // "grpc.client.attempt.sent_total_compressed_message_size"
+ attemptSentTotalCompressedMessageSize otelmetric.Int64Histogram
+ // "grpc.client.attempt.rcvd_total_compressed_message_size"
+ attemptRcvdTotalCompressedMessageSize otelmetric.Int64Histogram
+ // "grpc.client.call.duration"
+ callDuration otelmetric.Float64Histogram
+}
+
+type serverMetrics struct {
+ // "grpc.server.call.started"
+ callStarted otelmetric.Int64Counter
+ // "grpc.server.call.sent_total_compressed_message_size"
+ callSentTotalCompressedMessageSize otelmetric.Int64Histogram
+ // "grpc.server.call.rcvd_total_compressed_message_size"
+ callRcvdTotalCompressedMessageSize otelmetric.Int64Histogram
+ // "grpc.server.call.duration"
+ callDuration otelmetric.Float64Histogram
+}
+
+func createInt64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64CounterOption) otelmetric.Int64Counter {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Int64Counter{}
+ }
+ ret, err := meter.Int64Counter(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Int64Counter{}
+ }
+ return ret
+}
+
+func createFloat64Counter(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64CounterOption) otelmetric.Float64Counter {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Float64Counter{}
+ }
+ ret, err := meter.Float64Counter(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Float64Counter{}
+ }
+ return ret
+}
+
+func createInt64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64HistogramOption) otelmetric.Int64Histogram {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Int64Histogram{}
+ }
+ ret, err := meter.Int64Histogram(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Int64Histogram{}
+ }
+ return ret
+}
+
+func createFloat64Histogram(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Float64HistogramOption) otelmetric.Float64Histogram {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Float64Histogram{}
+ }
+ ret, err := meter.Float64Histogram(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Float64Histogram{}
+ }
+ return ret
+}
+
+func createInt64Gauge(setOfMetrics map[estats.Metric]bool, metricName estats.Metric, meter otelmetric.Meter, options ...otelmetric.Int64GaugeOption) otelmetric.Int64Gauge {
+ if _, ok := setOfMetrics[metricName]; !ok {
+ return noop.Int64Gauge{}
+ }
+ ret, err := meter.Int64Gauge(string(metricName), options...)
+ if err != nil {
+ logger.Errorf("failed to register metric \"%v\", will not record: %v", metricName, err)
+ return noop.Int64Gauge{}
+ }
+ return ret
+}
+
+func optionFromLabels(labelKeys []string, optionalLabelKeys []string, optionalLabels []string, labelVals ...string) otelmetric.MeasurementOption {
+ var attributes []otelattribute.KeyValue
+
+ // Once it hits here lower level has guaranteed length of labelVals matches
+ // labelKeys + optionalLabelKeys.
+ for i, label := range labelKeys {
+ attributes = append(attributes, otelattribute.String(label, labelVals[i]))
+ }
+
+ for i, label := range optionalLabelKeys {
+ for _, optLabel := range optionalLabels { // o(n) could build out a set but n is currently capped at < 5
+ if label == optLabel {
+ attributes = append(attributes, otelattribute.String(label, labelVals[i+len(labelKeys)]))
+ }
+ }
+ }
+ return otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))
+}
+
+// registryMetrics implements MetricsRecorder for the client and server stats
+// handlers.
+type registryMetrics struct {
+ intCounts map[*estats.MetricDescriptor]otelmetric.Int64Counter
+ floatCounts map[*estats.MetricDescriptor]otelmetric.Float64Counter
+ intHistos map[*estats.MetricDescriptor]otelmetric.Int64Histogram
+ floatHistos map[*estats.MetricDescriptor]otelmetric.Float64Histogram
+ intGauges map[*estats.MetricDescriptor]otelmetric.Int64Gauge
+
+ optionalLabels []string
+}
+
+func (rm *registryMetrics) registerMetrics(metrics *estats.Metrics, meter otelmetric.Meter) {
+ rm.intCounts = make(map[*estats.MetricDescriptor]otelmetric.Int64Counter)
+ rm.floatCounts = make(map[*estats.MetricDescriptor]otelmetric.Float64Counter)
+ rm.intHistos = make(map[*estats.MetricDescriptor]otelmetric.Int64Histogram)
+ rm.floatHistos = make(map[*estats.MetricDescriptor]otelmetric.Float64Histogram)
+ rm.intGauges = make(map[*estats.MetricDescriptor]otelmetric.Int64Gauge)
+
+ for metric := range metrics.Metrics() {
+ desc := estats.DescriptorForMetric(metric)
+ if desc == nil {
+ // Either the metric was per call or the metric is not registered.
+ // Thus, if this component ever receives the desc as a handle in
+ // record it will be a no-op.
+ continue
+ }
+ switch desc.Type {
+ case estats.MetricTypeIntCount:
+ rm.intCounts[desc] = createInt64Counter(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description))
+ case estats.MetricTypeFloatCount:
+ rm.floatCounts[desc] = createFloat64Counter(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description))
+ case estats.MetricTypeIntHisto:
+ rm.intHistos[desc] = createInt64Histogram(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description), otelmetric.WithExplicitBucketBoundaries(desc.Bounds...))
+ case estats.MetricTypeFloatHisto:
+ rm.floatHistos[desc] = createFloat64Histogram(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description), otelmetric.WithExplicitBucketBoundaries(desc.Bounds...))
+ case estats.MetricTypeIntGauge:
+ rm.intGauges[desc] = createInt64Gauge(metrics.Metrics(), desc.Name, meter, otelmetric.WithUnit(desc.Unit), otelmetric.WithDescription(desc.Description))
+ }
+ }
+}
+
+func (rm *registryMetrics) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+ desc := handle.Descriptor()
+ if ic, ok := rm.intCounts[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ ic.Add(context.TODO(), incr, ao)
+ }
+}
+
+func (rm *registryMetrics) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+ desc := handle.Descriptor()
+ if fc, ok := rm.floatCounts[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ fc.Add(context.TODO(), incr, ao)
+ }
+}
+
+func (rm *registryMetrics) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+ desc := handle.Descriptor()
+ if ih, ok := rm.intHistos[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ ih.Record(context.TODO(), incr, ao)
+ }
+}
+
+func (rm *registryMetrics) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+ desc := handle.Descriptor()
+ if fh, ok := rm.floatHistos[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ fh.Record(context.TODO(), incr, ao)
+ }
+}
+
+func (rm *registryMetrics) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+ desc := handle.Descriptor()
+ if ig, ok := rm.intGauges[desc]; ok {
+ ao := optionFromLabels(desc.Labels, desc.OptionalLabels, rm.optionalLabels, labels...)
+ ig.Record(context.TODO(), incr, ao)
+ }
+}
+
+// Users of this component should use these bucket boundaries as part of their
+// SDK MeterProvider passed in. This component sends this as "advice" to the
+// API, which works, however this stability is not guaranteed, so for safety the
+// SDK Meter Provider provided should set these bounds for corresponding
+// metrics.
+var (
+ // DefaultLatencyBounds are the default bounds for latency metrics.
+ DefaultLatencyBounds = []float64{0, 0.00001, 0.00005, 0.0001, 0.0003, 0.0006, 0.0008, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.008, 0.01, 0.013, 0.016, 0.02, 0.025, 0.03, 0.04, 0.05, 0.065, 0.08, 0.1, 0.13, 0.16, 0.2, 0.25, 0.3, 0.4, 0.5, 0.65, 0.8, 1, 2, 5, 10, 20, 50, 100} // provide "advice" through API, SDK should set this too
+ // DefaultSizeBounds are the default bounds for metrics which record size.
+ DefaultSizeBounds = []float64{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296}
+ // defaultPerCallMetrics are the default metrics provided by this module.
+ defaultPerCallMetrics = estats.NewMetrics(ClientAttemptStarted, ClientAttemptDuration, ClientAttemptSentCompressedTotalMessageSize, ClientAttemptRcvdCompressedTotalMessageSize, ClientCallDuration, ServerCallStarted, ServerCallSentCompressedTotalMessageSize, ServerCallRcvdCompressedTotalMessageSize, ServerCallDuration)
+)
+
+// DefaultMetrics returns a set of default OpenTelemetry metrics.
+//
+// This should only be invoked after init time.
+func DefaultMetrics() *estats.Metrics {
+ return defaultPerCallMetrics.Join(estats.DefaultMetrics)
+}
diff --git a/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go
new file mode 100644
index 0000000000000..eaea559b2c103
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/opentelemetry/server_metrics.go
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package opentelemetry
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc"
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
+
+ otelattribute "go.opentelemetry.io/otel/attribute"
+ otelmetric "go.opentelemetry.io/otel/metric"
+)
+
+type serverStatsHandler struct {
+ estats.MetricsRecorder
+ options Options
+ serverMetrics serverMetrics
+}
+
+func (h *serverStatsHandler) initializeMetrics() {
+ // Will set no metrics to record, logically making this stats handler a
+ // no-op.
+ if h.options.MetricsOptions.MeterProvider == nil {
+ return
+ }
+
+ meter := h.options.MetricsOptions.MeterProvider.Meter("grpc-go", otelmetric.WithInstrumentationVersion(grpc.Version))
+ if meter == nil {
+ return
+ }
+ metrics := h.options.MetricsOptions.Metrics
+ if metrics == nil {
+ metrics = DefaultMetrics()
+ }
+
+ h.serverMetrics.callStarted = createInt64Counter(metrics.Metrics(), "grpc.server.call.started", meter, otelmetric.WithUnit("call"), otelmetric.WithDescription("Number of server calls started."))
+ h.serverMetrics.callSentTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.server.call.sent_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes sent per server call."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...))
+ h.serverMetrics.callRcvdTotalCompressedMessageSize = createInt64Histogram(metrics.Metrics(), "grpc.server.call.rcvd_total_compressed_message_size", meter, otelmetric.WithUnit("By"), otelmetric.WithDescription("Compressed message bytes received per server call."), otelmetric.WithExplicitBucketBoundaries(DefaultSizeBounds...))
+ h.serverMetrics.callDuration = createFloat64Histogram(metrics.Metrics(), "grpc.server.call.duration", meter, otelmetric.WithUnit("s"), otelmetric.WithDescription("End-to-end time taken to complete a call from server transport's perspective."), otelmetric.WithExplicitBucketBoundaries(DefaultLatencyBounds...))
+
+ rm := ®istryMetrics{
+ optionalLabels: h.options.MetricsOptions.OptionalLabels,
+ }
+ h.MetricsRecorder = rm
+ rm.registerMetrics(metrics, meter)
+}
+
+// attachLabelsTransportStream intercepts SetHeader and SendHeader calls of the
+// underlying ServerTransportStream to attach metadataExchangeLabels.
+type attachLabelsTransportStream struct {
+ grpc.ServerTransportStream
+
+ attachedLabels atomic.Bool
+ metadataExchangeLabels metadata.MD
+}
+
+func (s *attachLabelsTransportStream) SetHeader(md metadata.MD) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerTransportStream.SetHeader(s.metadataExchangeLabels)
+ }
+ return s.ServerTransportStream.SetHeader(md)
+}
+
+func (s *attachLabelsTransportStream) SendHeader(md metadata.MD) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerTransportStream.SetHeader(s.metadataExchangeLabels)
+ }
+
+ return s.ServerTransportStream.SendHeader(md)
+}
+
+func (h *serverStatsHandler) unaryInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
+ var metadataExchangeLabels metadata.MD
+ if h.options.MetricsOptions.pluginOption != nil {
+ metadataExchangeLabels = h.options.MetricsOptions.pluginOption.GetMetadata()
+ }
+
+ sts := grpc.ServerTransportStreamFromContext(ctx)
+
+ alts := &attachLabelsTransportStream{
+ ServerTransportStream: sts,
+ metadataExchangeLabels: metadataExchangeLabels,
+ }
+ ctx = grpc.NewContextWithServerTransportStream(ctx, alts)
+
+ res, err := handler(ctx, req)
+ if err != nil { // maybe trailers-only if headers haven't already been sent
+ if !alts.attachedLabels.Swap(true) {
+ alts.SetTrailer(alts.metadataExchangeLabels)
+ }
+ } else { // headers will be written; a message was sent
+ if !alts.attachedLabels.Swap(true) {
+ alts.SetHeader(alts.metadataExchangeLabels)
+ }
+ }
+
+ return res, err
+}
+
+// attachLabelsStream embeds a grpc.ServerStream, and intercepts the
+// SetHeader/SendHeader/SendMsg/SendTrailer call to attach metadata exchange
+// labels.
+type attachLabelsStream struct {
+ grpc.ServerStream
+
+ attachedLabels atomic.Bool
+ metadataExchangeLabels metadata.MD
+}
+
+func (s *attachLabelsStream) SetHeader(md metadata.MD) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerStream.SetHeader(s.metadataExchangeLabels)
+ }
+
+ return s.ServerStream.SetHeader(md)
+}
+
+func (s *attachLabelsStream) SendHeader(md metadata.MD) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerStream.SetHeader(s.metadataExchangeLabels)
+ }
+
+ return s.ServerStream.SendHeader(md)
+}
+
+func (s *attachLabelsStream) SendMsg(m any) error {
+ if !s.attachedLabels.Swap(true) {
+ s.ServerStream.SetHeader(s.metadataExchangeLabels)
+ }
+ return s.ServerStream.SendMsg(m)
+}
+
+func (h *serverStatsHandler) streamInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ var metadataExchangeLabels metadata.MD
+ if h.options.MetricsOptions.pluginOption != nil {
+ metadataExchangeLabels = h.options.MetricsOptions.pluginOption.GetMetadata()
+ }
+ als := &attachLabelsStream{
+ ServerStream: ss,
+ metadataExchangeLabels: metadataExchangeLabels,
+ }
+ err := handler(srv, als)
+
+ // Add metadata exchange labels to trailers if never sent in headers,
+ // irrespective of whether or not RPC failed.
+ if !als.attachedLabels.Load() {
+ als.SetTrailer(als.metadataExchangeLabels)
+ }
+ return err
+}
+
+// TagConn exists to satisfy stats.Handler.
+func (h *serverStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn exists to satisfy stats.Handler.
+func (h *serverStatsHandler) HandleConn(context.Context, stats.ConnStats) {}
+
+// TagRPC implements per RPC context management.
+func (h *serverStatsHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ method := info.FullMethodName
+ if h.options.MetricsOptions.MethodAttributeFilter != nil {
+ if !h.options.MetricsOptions.MethodAttributeFilter(method) {
+ method = "other"
+ }
+ }
+ server := internal.ServerFromContext.(func(context.Context) *grpc.Server)(ctx)
+ if server == nil { // Shouldn't happen, defensive programming.
+ logger.Error("ctx passed into server side stats handler has no grpc server ref")
+ method = "other"
+ } else {
+ isRegisteredMethod := internal.IsRegisteredMethod.(func(*grpc.Server, string) bool)
+ if !isRegisteredMethod(server, method) {
+ method = "other"
+ }
+ }
+
+ ai := &attemptInfo{
+ startTime: time.Now(),
+ method: removeLeadingSlash(method),
+ }
+ ri := &rpcInfo{
+ ai: ai,
+ }
+ return setRPCInfo(ctx, ri)
+}
+
+// HandleRPC implements per RPC tracing and stats implementation.
+func (h *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ ri := getRPCInfo(ctx)
+ if ri == nil {
+ logger.Error("ctx passed into server side stats handler metrics event handling has no server call data present")
+ return
+ }
+ h.processRPCData(ctx, rs, ri.ai)
+}
+
+func (h *serverStatsHandler) processRPCData(ctx context.Context, s stats.RPCStats, ai *attemptInfo) {
+ switch st := s.(type) {
+ case *stats.InHeader:
+ if ai.pluginOptionLabels == nil && h.options.MetricsOptions.pluginOption != nil {
+ labels := h.options.MetricsOptions.pluginOption.GetLabels(st.Header)
+ if labels == nil {
+ labels = map[string]string{} // Shouldn't return a nil map. Make it empty if so to ignore future Get Calls for this Attempt.
+ }
+ ai.pluginOptionLabels = labels
+ }
+ attrs := otelmetric.WithAttributeSet(otelattribute.NewSet(
+ otelattribute.String("grpc.method", ai.method),
+ ))
+ h.serverMetrics.callStarted.Add(ctx, 1, attrs)
+ case *stats.OutPayload:
+ atomic.AddInt64(&ai.sentCompressedBytes, int64(st.CompressedLength))
+ case *stats.InPayload:
+ atomic.AddInt64(&ai.recvCompressedBytes, int64(st.CompressedLength))
+ case *stats.End:
+ h.processRPCEnd(ctx, ai, st)
+ default:
+ }
+}
+
+func (h *serverStatsHandler) processRPCEnd(ctx context.Context, ai *attemptInfo, e *stats.End) {
+ latency := float64(time.Since(ai.startTime)) / float64(time.Second)
+ st := "OK"
+ if e.Error != nil {
+ s, _ := status.FromError(e.Error)
+ st = canonicalString(s.Code())
+ }
+ attributes := []otelattribute.KeyValue{
+ otelattribute.String("grpc.method", ai.method),
+ otelattribute.String("grpc.status", st),
+ }
+ for k, v := range ai.pluginOptionLabels {
+ attributes = append(attributes, otelattribute.String(k, v))
+ }
+
+ // Allocate vararg slice once.
+ opts := []otelmetric.RecordOption{otelmetric.WithAttributeSet(otelattribute.NewSet(attributes...))}
+ h.serverMetrics.callDuration.Record(ctx, latency, opts...)
+ h.serverMetrics.callSentTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.sentCompressedBytes), opts...)
+ h.serverMetrics.callRcvdTotalCompressedMessageSize.Record(ctx, atomic.LoadInt64(&ai.recvCompressedBytes), opts...)
+}
+
+const (
+ // ServerCallStarted is the number of server calls started.
+ ServerCallStarted estats.Metric = "grpc.server.call.started"
+ // ServerCallSentCompressedTotalMessageSize is the compressed message bytes
+ // sent per server call.
+ ServerCallSentCompressedTotalMessageSize estats.Metric = "grpc.server.call.sent_total_compressed_message_size"
+ // ServerCallRcvdCompressedTotalMessageSize is the compressed message bytes
+ // received per server call.
+ ServerCallRcvdCompressedTotalMessageSize estats.Metric = "grpc.server.call.rcvd_total_compressed_message_size"
+ // ServerCallDuration is the end-to-end time taken to complete a call from
+ // server transport's perspective.
+ ServerCallDuration estats.Metric = "grpc.server.call.duration"
+)
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index bb2966e3b4c69..8f9e592f87012 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
}
- return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+ return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
}
func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 29846df222c38..0e72d85378b31 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
}
v := m.Get(fd)
- isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
- isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
- if isProto2Scalar || isSingularMessage {
+ if fd.HasPresence() {
if m.skipNull {
continue
}
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 8401be8c84fa1..024ffebd3ddef 100644
--- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
// dependency on the descriptor proto package).
package descopts
-import pref "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// These variables are set by the init function in descriptor.pb.go via logic
// in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
//
// Each variable is populated with a nil pointer to the options struct.
var (
- File pref.ProtoMessage
- Enum pref.ProtoMessage
- EnumValue pref.ProtoMessage
- Message pref.ProtoMessage
- Field pref.ProtoMessage
- Oneof pref.ProtoMessage
- ExtensionRange pref.ProtoMessage
- Service pref.ProtoMessage
- Method pref.ProtoMessage
+ File protoreflect.ProtoMessage
+ Enum protoreflect.ProtoMessage
+ EnumValue protoreflect.ProtoMessage
+ Message protoreflect.ProtoMessage
+ Field protoreflect.ProtoMessage
+ Oneof protoreflect.ProtoMessage
+ ExtensionRange protoreflect.ProtoMessage
+ Service protoreflect.ProtoMessage
+ Method protoreflect.ProtoMessage
)
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index 029a6a12d7423..08dad7692c64b 100644
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -5,7 +5,7 @@
// Package editionssupport defines constants for editions that are supported.
package editionssupport
-import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+import "google.golang.org/protobuf/types/descriptorpb"
const (
Minimum = descriptorpb.Edition_EDITION_PROTO2
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index df53ff40b25ae..fa790e0ff1968 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -258,6 +258,7 @@ type (
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsWeak bool // promoted from google.protobuf.FieldOptions
+ IsLazy bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
@@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool {
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsLazy() bool { return fd.L1.IsLazy }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -425,6 +427,7 @@ type (
Extendee protoreflect.MessageDescriptor
Cardinality protoreflect.Cardinality
Kind protoreflect.Kind
+ IsLazy bool
EditionFeatures EditionFeatures
}
ExtensionL2 struct {
@@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool {
}
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
func (xd *Extension) IsMap() bool { return false }
func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 8a57d60b08c13..d2f549497eb7d 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) {
switch num {
case genid.FieldOptions_Packed_field_number:
xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ xd.L1.IsLazy = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index e56c91a8dbe0d..67a51b327c5c2 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) {
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
case genid.FieldOptions_Weak_field_number:
fd.L1.IsWeak = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ fd.L1.IsLazy = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 11f5f356b660b..fd4d0c83d2575 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
+ case genid.FeatureSet_Go_ext_number:
parent = unmarshalGoFeature(v, parent)
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
index 45ccd01211ce4..d9b9d916a20e2 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
// and the well-known types.
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 9a652a2b42421..7f67cbb6e97e5 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -12,20 +12,25 @@ import (
const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
-// Names for google.protobuf.GoFeatures.
+// Names for pb.GoFeatures.
const (
GoFeatures_message_name protoreflect.Name = "GoFeatures"
- GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
+ GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
)
-// Field names for google.protobuf.GoFeatures.
+// Field names for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
- GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
+ GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
)
-// Field numbers for google.protobuf.GoFeatures.
+// Field numbers for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
)
+
+// Extension numbers
+const (
+ FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index 8f9ea02ff2a4c..bef5a25fbbf02 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field names and numbers for synthetic map entry messages.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 429384b85b02d..9404270de0ba3 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field name and number for messages in wrappers.proto.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 4bb0a7a20ce22..0d5b546e0eed4 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,7 +67,6 @@ type lazyExtensionValue struct {
xi *extensionFieldInfo
value protoreflect.Value
b []byte
- fn func() protoreflect.Value
}
type ExtensionField struct {
@@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
}
f.lazy.value = val
} else {
- f.lazy.value = f.lazy.fn()
+ panic("No support for lazy fns for ExtensionField")
}
f.lazy.xi = nil
- f.lazy.fn = nil
f.lazy.b = nil
atomic.StoreUint32(&f.lazy.atomicOnce, 1)
}
@@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
f.lazy = nil
}
-// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
-// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
- f.typ = t
- f.lazy = &lazyExtensionValue{fn: fn}
-}
-
// Value returns the value of the extension field.
// This may be called concurrently.
func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 78ee47e44b92b..7c1f66c8c1956 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
if err != nil {
return out, err
}
+ if cf.funcs.isInit == nil {
+ out.initialized = true
+ }
vi.Set(vw)
return out, nil
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 6b2fdbb739a23..78be9df3420de 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
+ if mi.methods.Equal == nil {
+ mi.methods.Equal = equal
+ }
}
// getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
deleted file mode 100644
index 145c577bd6b24..0000000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
-)
-
-func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
- v := p.v.Elem().Int()
- return f.tagsize + protowire.SizeVarint(uint64(v))
-}
-
-func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- v := p.v.Elem().Int()
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(v))
- return b, nil
-}
-
-func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- p.v.Elem().SetInt(int64(v))
- out.n = n
- return out, nil
-}
-
-func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(src.v.Elem())
-}
-
-var coderEnum = pointerCoderFuncs{
- size: sizeEnum,
- marshal: appendEnum,
- unmarshal: consumeEnum,
- merge: mergeEnum,
-}
-
-func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- if p.v.Elem().Int() == 0 {
- return 0
- }
- return sizeEnum(p, f, opts)
-}
-
-func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- if p.v.Elem().Int() == 0 {
- return b, nil
- }
- return appendEnum(b, p, f, opts)
-}
-
-func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if src.v.Elem().Int() != 0 {
- dst.v.Elem().Set(src.v.Elem())
- }
-}
-
-var coderEnumNoZero = pointerCoderFuncs{
- size: sizeEnumNoZero,
- marshal: appendEnumNoZero,
- unmarshal: consumeEnum,
- merge: mergeEnumNoZero,
-}
-
-func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- return sizeEnum(pointer{p.v.Elem()}, f, opts)
-}
-
-func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- return appendEnum(b, pointer{p.v.Elem()}, f, opts)
-}
-
-func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- if p.v.Elem().IsNil() {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
- }
- return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
-}
-
-func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if !src.v.Elem().IsNil() {
- v := reflect.New(dst.v.Type().Elem().Elem())
- v.Elem().Set(src.v.Elem().Elem())
- dst.v.Elem().Set(v)
- }
-}
-
-var coderEnumPtr = pointerCoderFuncs{
- size: sizeEnumPtr,
- marshal: appendEnumPtr,
- unmarshal: consumeEnumPtr,
- merge: mergeEnumPtr,
-}
-
-func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
- }
- return size
-}
-
-func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- s := p.v.Elem()
- if wtyp == protowire.BytesType {
- b, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return out, errDecode
- }
- for len(b) > 0 {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- b = b[n:]
- }
- out.n = n
- return out, nil
- }
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- out.n = n
- return out, nil
-}
-
-func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
-}
-
-var coderEnumSlice = pointerCoderFuncs{
- size: sizeEnumSlice,
- marshal: appendEnumSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
-
-func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return 0
- }
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- return f.tagsize + protowire.SizeBytes(n)
-}
-
-func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return b, nil
- }
- b = protowire.AppendVarint(b, f.wiretag)
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- b = protowire.AppendVarint(b, uint64(n))
- for i := 0; i < llen; i++ {
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-var coderEnumPackedSlice = pointerCoderFuncs{
- size: sizeEnumPackedSlice,
- marshal: appendEnumPackedSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 757642e23c9ed..077712c2c5a3a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
// When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index e06ece55a26c1..f72ddd882f324 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
return protoreflect.ValueOfString(v.Convert(stringType).String())
}
func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
- // pref.Value.String never panics, so we go through an interface
+ // protoreflect.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index febd212247248..6254f5de41f5d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,7 +10,7 @@ import (
"sync/atomic"
"google.golang.org/protobuf/internal/flags"
- proto "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/proto"
piface "google.golang.org/protobuf/runtime/protoiface"
)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
new file mode 100644
index 0000000000000..9f6c32a7d8cdf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go
@@ -0,0 +1,224 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "bytes"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func equal(in protoiface.EqualInput) protoiface.EqualOutput {
+ return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
+}
+
+// equalMessage is a fast-path variant of protoreflect.equalMessage.
+// It takes advantage of the internal messageState type to avoid
+// unnecessary allocations, type assertions.
+func equalMessage(mx, my protoreflect.Message) bool {
+ if mx == nil || my == nil {
+ return mx == my
+ }
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ msx, ok := mx.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ msy, ok := my.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+
+ mi := msx.messageInfo()
+ miy := msy.messageInfo()
+ if mi != miy {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ mi.init()
+ // Compares regular fields
+ // Modified Message.Range code that compares two messages of the same type
+ // while going over the fields.
+ for _, ri := range mi.rangeInfos {
+ var fd protoreflect.FieldDescriptor
+ var vx, vy protoreflect.Value
+
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ hx := ri.has(msx.pointer())
+ hy := ri.has(msy.pointer())
+ if hx != hy {
+ return false
+ }
+ if !hx {
+ continue
+ }
+ fd = ri.fieldDesc
+ vx = ri.get(msx.pointer())
+ vy = ri.get(msy.pointer())
+ case *oneofInfo:
+ fnx := ri.which(msx.pointer())
+ fny := ri.which(msy.pointer())
+ if fnx != fny {
+ return false
+ }
+ if fnx <= 0 {
+ continue
+ }
+ fi := mi.fields[fnx]
+ fd = fi.fieldDesc
+ vx = fi.get(msx.pointer())
+ vy = fi.get(msy.pointer())
+ }
+
+ if !equalValue(fd, vx, vy) {
+ return false
+ }
+ }
+
+ // Compare extensions.
+ // This is more complicated because mx or my could have empty/nil extension maps,
+ // however some populated extension map values are equal to nil extension maps.
+ emx := mi.extensionMap(msx.pointer())
+ emy := mi.extensionMap(msy.pointer())
+ if emx != nil {
+ for k, x := range *emx {
+ xd := x.Type().TypeDescriptor()
+ xv := x.Value()
+ var y ExtensionField
+ ok := false
+ if emy != nil {
+ y, ok = (*emy)[k]
+ }
+ // We need to treat empty lists as equal to nil values
+ if emy == nil || !ok {
+ if xd.IsList() && xv.List().Len() == 0 {
+ continue
+ }
+ return false
+ }
+
+ if !equalValue(xd, xv, y.Value()) {
+ return false
+ }
+ }
+ }
+ if emy != nil {
+ // emy may have extensions emx does not have, need to check them as well
+ for k, y := range *emy {
+ if emx != nil {
+ // emx has the field, so we already checked it
+ if _, ok := (*emx)[k]; ok {
+ continue
+ }
+ }
+ // Empty lists are equal to nil
+ if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
+ continue
+ }
+
+ // Cant be equal if the extension is populated
+ return false
+ }
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
+ // slow path
+ if fd.Kind() != protoreflect.MessageKind {
+ return vx.Equal(vy)
+ }
+
+ // fast path special cases
+ if fd.IsMap() {
+ if fd.MapValue().Kind() == protoreflect.MessageKind {
+ return equalMessageMap(vx.Map(), vy.Map())
+ }
+ return vx.Equal(vy)
+ }
+
+ if fd.IsList() {
+ return equalMessageList(vx.List(), vy.List())
+ }
+
+ return equalMessage(vx.Message(), vy.Message())
+}
+
+// Mostly copied from protoreflect.equalMap.
+// This variant only works for messages as map types.
+// All other map types should be handled via Value.Equal.
+func equalMessageMap(mx, my protoreflect.Map) bool {
+ if mx.Len() != my.Len() {
+ return false
+ }
+ equal := true
+ mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+ if !my.Has(k) {
+ equal = false
+ return false
+ }
+ vy := my.Get(k)
+ equal = equalMessage(vx.Message(), vy.Message())
+ return equal
+ })
+ return equal
+}
+
+// Mostly copied from protoreflect.equalList.
+// The only change is the usage of equalImpl instead of protoreflect.equalValue.
+func equalMessageList(lx, ly protoreflect.List) bool {
+ if lx.Len() != ly.Len() {
+ return false
+ }
+ for i := 0; i < lx.Len(); i++ {
+ // We only operate on messages here since equalImpl will not call us in any other case.
+ if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+// Copied from protoreflect.equalUnknown.
+func equalUnknown(x, y protoreflect.RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ if len(mx) != len(my) {
+ return false
+ }
+
+ for k, v1 := range mx {
+ if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 6e8677ee633f9..b6849d66927d2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
func (x placeholderExtension) HasOptionalKeyword() bool { return false }
func (x placeholderExtension) IsExtension() bool { return true }
func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsLazy() bool { return false }
func (x placeholderExtension) IsPacked() bool { return false }
func (x placeholderExtension) IsList() bool { return false }
func (x placeholderExtension) IsMap() bool { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 019399d454d32..741b5ed29cf84 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -30,8 +30,8 @@ type MessageInfo struct {
// Desc is the underlying message descriptor type and must be populated.
Desc protoreflect.MessageDescriptor
- // Exporter must be provided in a purego environment in order to provide
- // access to unexported fields.
+ // Deprecated: Exporter will be removed the next time we bump
+ // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
Exporter exporter
// OneofWrappers is list of pointers to oneof wrapper struct types.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
deleted file mode 100644
index da685e8a29d96..0000000000000
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "fmt"
- "reflect"
- "sync"
-)
-
-const UnsafeEnabled = false
-
-// Pointer is an opaque pointer type.
-type Pointer any
-
-// offset represents the offset to a struct field, accessible from a pointer.
-// The offset is the field index into a struct.
-type offset struct {
- index int
- export exporter
-}
-
-// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
- if len(f.Index) != 1 {
- panic("embedded structs are not supported")
- }
- if f.PkgPath == "" {
- return offset{index: f.Index[0]} // field is already exported
- }
- if x == nil {
- panic("exporter must be provided for unexported field")
- }
- return offset{index: f.Index[0], export: x}
-}
-
-// IsValid reports whether the offset is valid.
-func (f offset) IsValid() bool { return f.index >= 0 }
-
-// invalidOffset is an invalid field offset.
-var invalidOffset = offset{index: -1}
-
-// zeroOffset is a noop when calling pointer.Apply.
-var zeroOffset = offset{index: 0}
-
-// pointer is an abstract representation of a pointer to a struct or field.
-type pointer struct{ v reflect.Value }
-
-// pointerOf returns p as a pointer.
-func pointerOf(p Pointer) pointer {
- return pointerOfIface(p)
-}
-
-// pointerOfValue returns v as a pointer.
-func pointerOfValue(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v any) pointer {
- return pointer{v: reflect.ValueOf(v)}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p pointer) IsNil() bool {
- return p.v.IsNil()
-}
-
-// Apply adds an offset to the pointer to derive a new pointer
-// to a specified field. The current pointer must be pointing at a struct.
-func (p pointer) Apply(f offset) pointer {
- if f.export != nil {
- if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
- return pointer{v: v}
- }
- }
- return pointer{v: p.v.Elem().Field(f.index).Addr()}
-}
-
-// AsValueOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
-func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
- if got := p.v.Type().Elem(); got != t {
- panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
- }
- return p.v
-}
-
-// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) any {
- return p.AsValueOf(t).Interface()
-}
-
-func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
-func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
-func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
-func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
-func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
-func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
-func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
-func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
-func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
-func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
-func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
-func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
-func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
-func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
-func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
-func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
-func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
-func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
-func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
-func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
-func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
-func (p pointer) String() *string { return p.v.Interface().(*string) }
-func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
-func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
-func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
-func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
-func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
-func (p pointer) Extensions() *map[int32]ExtensionField {
- return p.v.Interface().(*map[int32]ExtensionField)
-}
-
-func (p pointer) Elem() pointer {
- return pointer{v: p.v.Elem()}
-}
-
-// PointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) PointerSlice() []pointer {
- // TODO: reconsider this
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// AppendPointerSlice appends v to p, which must be a []*T.
-func (p pointer) AppendPointerSlice(v pointer) {
- sp := p.v.Elem()
- sp.Set(reflect.Append(sp, v.v))
-}
-
-// SetPointer sets *p to v.
-func (p pointer) SetPointer(v pointer) {
- p.v.Elem().Set(v.v)
-}
-
-func growSlice(p pointer, addCap int) {
- // TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
- in := p.v.Elem()
- out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
- reflect.Copy(out, in)
- p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
-func (ms *messageState) pointer() pointer { panic("not supported") }
-func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
-
-type atomicNilMessage struct {
- once sync.Once
- m messageReflectWrapper
-}
-
-func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
- m.once.Do(func() {
- m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
- m.m.mi = mi
- })
- return &m.m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 5f20ca5d8ab5e..79e186667b70f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
import (
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
deleted file mode 100644
index a1f6f333860e8..0000000000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package strs
-
-import pref "google.golang.org/protobuf/reflect/protoreflect"
-
-func UnsafeString(b []byte) string {
- return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
- return []byte(s)
-}
-
-type Builder struct{}
-
-func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
- return prefix.Append(name)
-}
-
-func (*Builder) MakeString(b []byte) string {
- return string(b)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index a008acd09082a..832a7988f145f 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
package strs
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
index 60166f2ba3cff..1ffddf6877a9a 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
package strs
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index dbbf1f6862c46..fb8e15e8dad50 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 34
- Patch = 2
+ Minor = 35
+ Patch = 1
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 1a0be1b03c73d..c36d4a9cd75b7 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,6 +8,7 @@ import (
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// Equal reports whether two messages are equal,
@@ -51,6 +52,14 @@ func Equal(x, y Message) bool {
if mx.IsValid() != my.IsValid() {
return false
}
+
+ // Only one of the messages needs to implement the fast-path for it to work.
+ pmx := protoMethods(mx)
+ pmy := protoMethods(my)
+ if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
+ return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
+ }
+
vx := protoreflect.ValueOfMessage(mx)
vy := protoreflect.ValueOfMessage(my)
return vx.Equal(vy)
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index d248f29284684..78445d116f7bd 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
// If the field is unpopulated, it returns the default value for
// scalars and an immutable, empty value for lists or messages.
// It panics if xt does not extend m.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// GetExtension, then the call should be followed immediately by a
+// type assertion to the expected output value. For example:
+//
+// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
+//
+// This pattern enables static analysis tools to verify that the asserted type
+// matches the Go type associated with the extension field and
+// also enables a possible future migration to a type-safe extension API.
+//
+// Since singular messages are the most common extension type, the pattern of
+// calling HasExtension followed by GetExtension may be simplified to:
+//
+// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
+// ... // make use of mm
+// }
+//
+// The mm variable is non-nil if and only if HasExtension reports true.
func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// Treat nil message interface as an empty message; return the default.
if m == nil {
@@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// SetExtension stores the value of an extension field.
// It panics if m is invalid, xt does not extend m, or if type of v
// is invalid for the specified extension field.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
+// concrete type that matches the expected Go type for the extension descriptor
+// so that static analysis tools can verify type correctness.
+// This also enables a possible future migration to a type-safe extension API.
func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
xd := xt.TypeDescriptor()
pv := xt.ValueOf(v)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 85617554272cb..ebcb4a8ab138e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
f.L1.Options = func() protoreflect.ProtoMessage { return opts }
f.L1.IsWeak = opts.GetWeak()
+ f.L1.IsLazy = opts.GetLazy()
if opts.Packed != nil {
f.L1.EditionFeatures.IsPacked = opts.GetPacked()
}
@@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
if xd.JsonName != nil {
x.L2.StringName.InitJSON(xd.GetJsonName())
}
+ if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
+ x.L1.Kind = protoreflect.GroupKind
+ }
}
return xs, nil
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 804830eda36f3..002e0047aeabb 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -14,7 +14,7 @@ import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
- gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
+ "google.golang.org/protobuf/types/gofeaturespb"
)
var defaults = &descriptorpb.FeatureSetDefaults{}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index d5d5af6ebedb8..742cb518c40b1 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,6 +23,7 @@ type (
Unmarshal func(unmarshalInput) (unmarshalOutput, error)
Merge func(mergeInput) mergeOutput
CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+ Equal func(equalInput) equalOutput
}
supportFlags = uint64
sizeInput = struct {
@@ -75,4 +76,13 @@ type (
checkInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+ equalInput = struct {
+ pragma.NoUnkeyedLiterals
+ MessageA Message
+ MessageB Message
+ }
+ equalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Equal bool
+ }
)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
deleted file mode 100644
index 75f83a2af030c..0000000000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package protoreflect
-
-import "google.golang.org/protobuf/internal/pragma"
-
-type valueType int
-
-const (
- nilType valueType = iota
- boolType
- int32Type
- int64Type
- uint32Type
- uint64Type
- float32Type
- float64Type
- stringType
- bytesType
- enumType
- ifaceType
-)
-
-// value is a union where only one type can be represented at a time.
-// This uses a distinct field for each type. This is type safe in Go, but
-// occupies more memory than necessary (72B).
-type value struct {
- pragma.DoNotCompare // 0B
-
- typ valueType // 8B
- num uint64 // 8B
- str string // 16B
- bin []byte // 24B
- iface any // 16B
-}
-
-func valueOfString(v string) Value {
- return Value{typ: stringType, str: v}
-}
-func valueOfBytes(v []byte) Value {
- return Value{typ: bytesType, bin: v}
-}
-func valueOfIface(v any) Value {
- return Value{typ: ifaceType, iface: v}
-}
-
-func (v Value) getString() string {
- return v.str
-}
-func (v Value) getBytes() []byte {
- return v.bin
-}
-func (v Value) getIface() any {
- return v.iface
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index 7f3583ead81a0..0015fcb35d832 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
package protoreflect
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
index f7d386990a0f3..479527b58dd37 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
package protoreflect
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 44cf467d8845d..246156561ce46 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,6 +39,9 @@ type Methods = struct {
// CheckInitialized returns an error if any required fields in the message are not set.
CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+
+ // Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
+ Equal func(EqualInput) EqualOutput
}
// SupportFlags indicate support for optional features.
@@ -166,3 +169,18 @@ type CheckInitializedInput = struct {
type CheckInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+
+// EqualInput is input to the Equal method.
+type EqualInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ MessageA protoreflect.Message
+ MessageB protoreflect.Message
+}
+
+// EqualOutput is output from the Equal method.
+type EqualOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Equal bool
+}
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 9403eb075077f..6dea75cd5b15f 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -1217,11 +1217,9 @@ type FileDescriptorSet struct {
func (x *FileDescriptorSet) Reset() {
*x = FileDescriptorSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileDescriptorSet) String() string {
@@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {}
func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1291,11 +1289,9 @@ type FileDescriptorProto struct {
func (x *FileDescriptorProto) Reset() {
*x = FileDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileDescriptorProto) String() string {
@@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {}
func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1434,11 +1430,9 @@ type DescriptorProto struct {
func (x *DescriptorProto) Reset() {
*x = DescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto) String() string {
@@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {}
func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1561,11 +1555,9 @@ const (
func (x *ExtensionRangeOptions) Reset() {
*x = ExtensionRangeOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionRangeOptions) String() string {
@@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {}
func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct {
func (x *FieldDescriptorProto) Reset() {
*x = FieldDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldDescriptorProto) String() string {
@@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {}
func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct {
func (x *OneofDescriptorProto) Reset() {
*x = OneofDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OneofDescriptorProto) String() string {
@@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {}
func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct {
func (x *EnumDescriptorProto) Reset() {
*x = EnumDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumDescriptorProto) String() string {
@@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {}
func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct {
func (x *EnumValueDescriptorProto) Reset() {
*x = EnumValueDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValueDescriptorProto) String() string {
@@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {}
func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct {
func (x *ServiceDescriptorProto) Reset() {
*x = ServiceDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceDescriptorProto) String() string {
@@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {}
func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2082,11 +2064,9 @@ const (
func (x *MethodDescriptorProto) Reset() {
*x = MethodDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MethodDescriptorProto) String() string {
@@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {}
func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2267,11 +2247,9 @@ const (
func (x *FileOptions) Reset() {
*x = FileOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileOptions) String() string {
@@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {}
func (x *FileOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2534,11 +2512,9 @@ const (
func (x *MessageOptions) Reset() {
*x = MessageOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MessageOptions) String() string {
@@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {}
func (x *MessageOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2707,11 +2683,9 @@ const (
func (x *FieldOptions) Reset() {
*x = FieldOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions) String() string {
@@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {}
func (x *FieldOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2849,11 +2823,9 @@ type OneofOptions struct {
func (x *OneofOptions) Reset() {
*x = OneofOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OneofOptions) String() string {
@@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {}
func (x *OneofOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2929,11 +2901,9 @@ const (
func (x *EnumOptions) Reset() {
*x = EnumOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumOptions) String() string {
@@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {}
func (x *EnumOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3026,11 +2996,9 @@ const (
func (x *EnumValueOptions) Reset() {
*x = EnumValueOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValueOptions) String() string {
@@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {}
func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3115,11 +3083,9 @@ const (
func (x *ServiceOptions) Reset() {
*x = ServiceOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceOptions) String() string {
@@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {}
func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3192,11 +3158,9 @@ const (
func (x *MethodOptions) Reset() {
*x = MethodOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MethodOptions) String() string {
@@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {}
func (x *MethodOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3274,11 +3238,9 @@ type UninterpretedOption struct {
func (x *UninterpretedOption) Reset() {
*x = UninterpretedOption{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UninterpretedOption) String() string {
@@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {}
func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3375,11 +3337,9 @@ type FeatureSet struct {
func (x *FeatureSet) Reset() {
*x = FeatureSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSet) String() string {
@@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {}
func (x *FeatureSet) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct {
func (x *FeatureSetDefaults) Reset() {
*x = FeatureSetDefaults{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSetDefaults) String() string {
@@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {}
func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3578,11 +3536,9 @@ type SourceCodeInfo struct {
func (x *SourceCodeInfo) Reset() {
*x = SourceCodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceCodeInfo) String() string {
@@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {}
func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct {
func (x *GeneratedCodeInfo) Reset() {
*x = GeneratedCodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GeneratedCodeInfo) String() string {
@@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {}
func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct {
func (x *DescriptorProto_ExtensionRange) Reset() {
*x = DescriptorProto_ExtensionRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto_ExtensionRange) String() string {
@@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct {
func (x *DescriptorProto_ReservedRange) Reset() {
*x = DescriptorProto_ReservedRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto_ReservedRange) String() string {
@@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {}
func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct {
func (x *ExtensionRangeOptions_Declaration) Reset() {
*x = ExtensionRangeOptions_Declaration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionRangeOptions_Declaration) String() string {
@@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct {
func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
*x = EnumDescriptorProto_EnumReservedRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumDescriptorProto_EnumReservedRange) String() string {
@@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct {
func (x *FieldOptions_EditionDefault) Reset() {
*x = FieldOptions_EditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions_EditionDefault) String() string {
@@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {}
func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct {
func (x *FieldOptions_FeatureSupport) Reset() {
*x = FieldOptions_FeatureSupport{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions_FeatureSupport) String() string {
@@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {}
func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct {
func (x *UninterpretedOption_NamePart) Reset() {
*x = UninterpretedOption_NamePart{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UninterpretedOption_NamePart) String() string {
@@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
*x = FeatureSetDefaults_FeatureSetEditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
@@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct {
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceCodeInfo_Location) String() string {
@@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct {
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GeneratedCodeInfo_Annotation) String() string {
@@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() {
if File_google_protobuf_descriptor_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*FileDescriptorSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*FileDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*ExtensionRangeOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*FieldDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*OneofDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*EnumDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*EnumValueDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*MethodDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*FileOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*MessageOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*OneofOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*EnumOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*EnumValueOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*ServiceOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*MethodOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*UninterpretedOption); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSetDefaults); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*SourceCodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*GeneratedCodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto_ExtensionRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*DescriptorProto_ReservedRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*ExtensionRangeOptions_Declaration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions_EditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*FieldOptions_FeatureSupport); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*UninterpretedOption_NamePart); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*SourceCodeInfo_Location); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*GeneratedCodeInfo_Annotation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index a2ca940c50fbd..c7e860fcd6d87 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -29,11 +29,9 @@ type GoFeatures struct {
func (x *GoFeatures) Reset() {
*x = GoFeatures{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_go_features_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_go_features_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GoFeatures) String() string {
@@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {}
func (x *GoFeatures) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_go_features_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() {
if File_google_protobuf_go_features_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*GoFeatures); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 7172b43d383f8..87da199a386e5 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) {
func (x *Any) Reset() {
*x = Any{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_any_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_any_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Any) String() string {
@@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {}
func (x *Any) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_any_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() {
if File_google_protobuf_any_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Any); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go
index 4f2fe89ef11cc..fdc3aef2c65ff 100644
--- a/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/apipb/api.pb.go
@@ -94,11 +94,9 @@ type Api struct {
func (x *Api) Reset() {
*x = Api{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_api_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_api_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Api) String() string {
@@ -109,7 +107,7 @@ func (*Api) ProtoMessage() {}
func (x *Api) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_api_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -197,11 +195,9 @@ type Method struct {
func (x *Method) Reset() {
*x = Method{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_api_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_api_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Method) String() string {
@@ -212,7 +208,7 @@ func (*Method) ProtoMessage() {}
func (x *Method) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_api_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -368,11 +364,9 @@ type Mixin struct {
func (x *Mixin) Reset() {
*x = Mixin{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_api_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_api_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Mixin) String() string {
@@ -383,7 +377,7 @@ func (*Mixin) ProtoMessage() {}
func (x *Mixin) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_api_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -516,44 +510,6 @@ func file_google_protobuf_api_proto_init() {
if File_google_protobuf_api_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_api_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Api); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_api_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Method); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_api_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Mixin); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 1b71bcd910af7..b99d4d2410927 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -245,11 +245,9 @@ func (x *Duration) check() uint {
func (x *Duration) Reset() {
*x = Duration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_duration_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_duration_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Duration) String() string {
@@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {}
func (x *Duration) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_duration_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() {
if File_google_protobuf_duration_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Duration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index d87b4fb8281d0..1761bc9c69a56 100644
--- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -55,11 +55,9 @@ type Empty struct {
func (x *Empty) Reset() {
*x = Empty{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_empty_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_empty_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Empty) String() string {
@@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {}
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_empty_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() {
if File_google_protobuf_empty_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Empty); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index ac1e91bb6ddb2..19de8d371fd90 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -467,11 +467,9 @@ func rangeFields(path string, f func(field string) bool) bool {
func (x *FieldMask) Reset() {
*x = FieldMask{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldMask) String() string {
@@ -482,7 +480,7 @@ func (*FieldMask) ProtoMessage() {}
func (x *FieldMask) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -553,20 +551,6 @@ func file_google_protobuf_field_mask_proto_init() {
if File_google_protobuf_field_mask_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*FieldMask); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go
index fa185780056de..4d15e9748c927 100644
--- a/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/sourcecontextpb/source_context.pb.go
@@ -54,11 +54,9 @@ type SourceContext struct {
func (x *SourceContext) Reset() {
*x = SourceContext{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_source_context_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_source_context_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceContext) String() string {
@@ -69,7 +67,7 @@ func (*SourceContext) ProtoMessage() {}
func (x *SourceContext) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_source_context_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -141,20 +139,6 @@ func file_google_protobuf_source_context_proto_init() {
if File_google_protobuf_source_context_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_source_context_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*SourceContext); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index d45361cbc7295..8f206a661172c 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -120,6 +120,7 @@ package structpb
import (
base64 "encoding/base64"
+ json "encoding/json"
protojson "google.golang.org/protobuf/encoding/protojson"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -233,11 +234,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error {
func (x *Struct) Reset() {
*x = Struct{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Struct) String() string {
@@ -248,7 +247,7 @@ func (*Struct) ProtoMessage() {}
func (x *Struct) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -296,19 +295,20 @@ type Value struct {
// NewValue constructs a Value from a general-purpose Go interface.
//
-// ╔════════════════════════╤════════════════════════════════════════════╗
-// ║ Go type │ Conversion ║
-// ╠════════════════════════╪════════════════════════════════════════════╣
-// ║ nil │ stored as NullValue ║
-// ║ bool │ stored as BoolValue ║
-// ║ int, int32, int64 │ stored as NumberValue ║
-// ║ uint, uint32, uint64 │ stored as NumberValue ║
-// ║ float32, float64 │ stored as NumberValue ║
-// ║ string │ stored as StringValue; must be valid UTF-8 ║
-// ║ []byte │ stored as StringValue; base64-encoded ║
-// ║ map[string]any │ stored as StructValue ║
-// ║ []any │ stored as ListValue ║
-// ╚════════════════════════╧════════════════════════════════════════════╝
+// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗
+// ║ Go type │ Conversion ║
+// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣
+// ║ nil │ stored as NullValue ║
+// ║ bool │ stored as BoolValue ║
+// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║
+// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║
+// ║ float32, float64 │ stored as NumberValue ║
+// ║ json.Number │ stored as NumberValue ║
+// ║ string │ stored as StringValue; must be valid UTF-8 ║
+// ║ []byte │ stored as StringValue; base64-encoded ║
+// ║ map[string]any │ stored as StructValue ║
+// ║ []any │ stored as ListValue ║
+// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝
//
// When converting an int64 or uint64 to a NumberValue, numeric precision loss
// is possible since they are stored as a float64.
@@ -320,12 +320,20 @@ func NewValue(v any) (*Value, error) {
return NewBoolValue(v), nil
case int:
return NewNumberValue(float64(v)), nil
+ case int8:
+ return NewNumberValue(float64(v)), nil
+ case int16:
+ return NewNumberValue(float64(v)), nil
case int32:
return NewNumberValue(float64(v)), nil
case int64:
return NewNumberValue(float64(v)), nil
case uint:
return NewNumberValue(float64(v)), nil
+ case uint8:
+ return NewNumberValue(float64(v)), nil
+ case uint16:
+ return NewNumberValue(float64(v)), nil
case uint32:
return NewNumberValue(float64(v)), nil
case uint64:
@@ -334,6 +342,12 @@ func NewValue(v any) (*Value, error) {
return NewNumberValue(float64(v)), nil
case float64:
return NewNumberValue(float64(v)), nil
+ case json.Number:
+ n, err := v.Float64()
+ if err != nil {
+ return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err)
+ }
+ return NewNumberValue(n), nil
case string:
if !utf8.ValidString(v) {
return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
@@ -441,11 +455,9 @@ func (x *Value) UnmarshalJSON(b []byte) error {
func (x *Value) Reset() {
*x = Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Value) String() string {
@@ -456,7 +468,7 @@ func (*Value) ProtoMessage() {}
func (x *Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -613,11 +625,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error {
func (x *ListValue) Reset() {
*x = ListValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListValue) String() string {
@@ -628,7 +638,7 @@ func (*ListValue) ProtoMessage() {}
func (x *ListValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -742,44 +752,6 @@ func file_google_protobuf_struct_proto_init() {
if File_google_protobuf_struct_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Struct); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*ListValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
(*Value_NullValue)(nil),
(*Value_NumberValue)(nil),
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 83a5a645b0835..0d20722d70b77 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -254,11 +254,9 @@ func (x *Timestamp) check() uint {
func (x *Timestamp) Reset() {
*x = Timestamp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Timestamp) String() string {
@@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {}
func (x *Timestamp) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() {
if File_google_protobuf_timestamp_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Timestamp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go
index 52887fd5db66e..f0ca52a01b354 100644
--- a/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/typepb/type.pb.go
@@ -293,11 +293,9 @@ type Type struct {
func (x *Type) Reset() {
*x = Type{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Type) String() string {
@@ -308,7 +306,7 @@ func (*Type) ProtoMessage() {}
func (x *Type) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -404,11 +402,9 @@ type Field struct {
func (x *Field) Reset() {
*x = Field{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Field) String() string {
@@ -419,7 +415,7 @@ func (*Field) ProtoMessage() {}
func (x *Field) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -526,11 +522,9 @@ type Enum struct {
func (x *Enum) Reset() {
*x = Enum{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Enum) String() string {
@@ -541,7 +535,7 @@ func (*Enum) ProtoMessage() {}
func (x *Enum) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -614,11 +608,9 @@ type EnumValue struct {
func (x *EnumValue) Reset() {
*x = EnumValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValue) String() string {
@@ -629,7 +621,7 @@ func (*EnumValue) ProtoMessage() {}
func (x *EnumValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -686,11 +678,9 @@ type Option struct {
func (x *Option) Reset() {
*x = Option{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_type_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_type_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Option) String() string {
@@ -701,7 +691,7 @@ func (*Option) ProtoMessage() {}
func (x *Option) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_type_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -906,68 +896,6 @@ func file_google_protobuf_type_proto_init() {
if File_google_protobuf_type_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_type_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Type); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_type_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Field); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_type_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Enum); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_type_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*EnumValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_type_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*Option); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index e473f826aa31b..006060e5695fc 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue {
func (x *DoubleValue) Reset() {
*x = DoubleValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DoubleValue) String() string {
@@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {}
func (x *DoubleValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -125,11 +123,9 @@ func Float(v float32) *FloatValue {
func (x *FloatValue) Reset() {
*x = FloatValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FloatValue) String() string {
@@ -140,7 +136,7 @@ func (*FloatValue) ProtoMessage() {}
func (x *FloatValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -181,11 +177,9 @@ func Int64(v int64) *Int64Value {
func (x *Int64Value) Reset() {
*x = Int64Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Int64Value) String() string {
@@ -196,7 +190,7 @@ func (*Int64Value) ProtoMessage() {}
func (x *Int64Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -237,11 +231,9 @@ func UInt64(v uint64) *UInt64Value {
func (x *UInt64Value) Reset() {
*x = UInt64Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UInt64Value) String() string {
@@ -252,7 +244,7 @@ func (*UInt64Value) ProtoMessage() {}
func (x *UInt64Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -293,11 +285,9 @@ func Int32(v int32) *Int32Value {
func (x *Int32Value) Reset() {
*x = Int32Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Int32Value) String() string {
@@ -308,7 +298,7 @@ func (*Int32Value) ProtoMessage() {}
func (x *Int32Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -349,11 +339,9 @@ func UInt32(v uint32) *UInt32Value {
func (x *UInt32Value) Reset() {
*x = UInt32Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UInt32Value) String() string {
@@ -364,7 +352,7 @@ func (*UInt32Value) ProtoMessage() {}
func (x *UInt32Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -405,11 +393,9 @@ func Bool(v bool) *BoolValue {
func (x *BoolValue) Reset() {
*x = BoolValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BoolValue) String() string {
@@ -420,7 +406,7 @@ func (*BoolValue) ProtoMessage() {}
func (x *BoolValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -461,11 +447,9 @@ func String(v string) *StringValue {
func (x *StringValue) Reset() {
*x = StringValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StringValue) String() string {
@@ -476,7 +460,7 @@ func (*StringValue) ProtoMessage() {}
func (x *StringValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -517,11 +501,9 @@ func Bytes(v []byte) *BytesValue {
func (x *BytesValue) Reset() {
*x = BytesValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BytesValue) String() string {
@@ -532,7 +514,7 @@ func (*BytesValue) ProtoMessage() {}
func (x *BytesValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -629,116 +611,6 @@ func file_google_protobuf_wrappers_proto_init() {
if File_google_protobuf_wrappers_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*DoubleValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*FloatValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Int64Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*UInt64Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*Int32Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*UInt32Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*BoolValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*StringValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*BytesValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c52baaa43345d..d30a86aa29e7b 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,8 +1,8 @@
-# cel.dev/expr v0.16.0
+# cel.dev/expr v0.16.1
## explicit; go 1.18
cel.dev/expr
-# cloud.google.com/go v0.115.1
-## explicit; go 1.20
+# cloud.google.com/go v0.116.0
+## explicit; go 1.21
cloud.google.com/go
cloud.google.com/go/internal
cloud.google.com/go/internal/detect
@@ -10,7 +10,7 @@ cloud.google.com/go/internal/optional
cloud.google.com/go/internal/pubsub
cloud.google.com/go/internal/trace
cloud.google.com/go/internal/version
-# cloud.google.com/go/auth v0.9.3
+# cloud.google.com/go/auth v0.9.8
## explicit; go 1.21
cloud.google.com/go/auth
cloud.google.com/go/auth/credentials
@@ -22,6 +22,7 @@ cloud.google.com/go/auth/credentials/internal/stsexchange
cloud.google.com/go/auth/grpctransport
cloud.google.com/go/auth/httptransport
cloud.google.com/go/auth/internal
+cloud.google.com/go/auth/internal/compute
cloud.google.com/go/auth/internal/credsfile
cloud.google.com/go/auth/internal/jwt
cloud.google.com/go/auth/internal/transport
@@ -37,8 +38,8 @@ cloud.google.com/go/bigtable/apiv2/bigtablepb
cloud.google.com/go/bigtable/bttest
cloud.google.com/go/bigtable/internal
cloud.google.com/go/bigtable/internal/option
-# cloud.google.com/go/compute/metadata v0.5.0
-## explicit; go 1.20
+# cloud.google.com/go/compute/metadata v0.5.2
+## explicit; go 1.21
cloud.google.com/go/compute/metadata
# cloud.google.com/go/iam v1.2.1
## explicit; go 1.21
@@ -62,8 +63,8 @@ cloud.google.com/go/pubsub/apiv1/pubsubpb
cloud.google.com/go/pubsub/internal
cloud.google.com/go/pubsub/internal/distribution
cloud.google.com/go/pubsub/internal/scheduler
-# cloud.google.com/go/storage v1.43.0
-## explicit; go 1.20
+# cloud.google.com/go/storage v1.44.0
+## explicit; go 1.21
cloud.google.com/go/storage
cloud.google.com/go/storage/internal
cloud.google.com/go/storage/internal/apiv2
@@ -206,6 +207,15 @@ github.com/DataDog/sketches-go/ddsketch/store
# github.com/DmitriyVTitov/size v1.5.0
## explicit; go 1.14
github.com/DmitriyVTitov/size
+# github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1
+## explicit; go 1.21
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp
+# github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1
+## explicit; go 1.21
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric
+# github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1
+## explicit; go 1.21
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping
# github.com/IBM/go-sdk-core/v5 v5.17.5
## explicit; go 1.20
github.com/IBM/go-sdk-core/v5/core
@@ -515,7 +525,7 @@ github.com/cespare/xxhash
# github.com/cespare/xxhash/v2 v2.3.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
-# github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59
+# github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78
## explicit; go 1.19
github.com/cncf/xds/go/udpa/annotations
github.com/cncf/xds/go/udpa/type/v1
@@ -748,10 +758,12 @@ github.com/fluent/fluent-bit-go/output
# github.com/fsnotify/fsnotify v1.7.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
-# github.com/fsouza/fake-gcs-server v1.7.0
-## explicit
+# github.com/fsouza/fake-gcs-server v1.50.2
+## explicit; go 1.22
github.com/fsouza/fake-gcs-server/fakestorage
github.com/fsouza/fake-gcs-server/internal/backend
+github.com/fsouza/fake-gcs-server/internal/checksum
+github.com/fsouza/fake-gcs-server/internal/notification
# github.com/gabriel-vasile/mimetype v1.4.3
## explicit; go 1.20
github.com/gabriel-vasile/mimetype
@@ -977,6 +989,9 @@ github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1
github.com/gophercloud/gophercloud/openstack/identity/v3/tokens
github.com/gophercloud/gophercloud/openstack/utils
github.com/gophercloud/gophercloud/pagination
+# github.com/gorilla/handlers v1.5.2
+## explicit; go 1.20
+github.com/gorilla/handlers
# github.com/gorilla/mux v1.8.1
## explicit; go 1.20
github.com/gorilla/mux
@@ -1210,7 +1225,7 @@ github.com/json-iterator/go
# github.com/julienschmidt/httprouter v1.3.0
## explicit; go 1.7
github.com/julienschmidt/httprouter
-# github.com/klauspost/compress v1.17.10
+# github.com/klauspost/compress v1.17.11
## explicit; go 1.21
github.com/klauspost/compress
github.com/klauspost/compress/flate
@@ -1273,8 +1288,8 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.77
-## explicit; go 1.21
+# github.com/minio/minio-go/v7 v7.0.78
+## explicit; go 1.22
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/cors
github.com/minio/minio-go/v7/pkg/credentials
@@ -1383,6 +1398,9 @@ github.com/pkg/browser
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
+# github.com/pkg/xattr v0.4.10
+## explicit; go 1.14
+github.com/pkg/xattr
# github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10
## explicit; go 1.20
github.com/planetscale/vtprotobuf/protohelpers
@@ -1750,6 +1768,9 @@ go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp
# go.opentelemetry.io/collector/semconv v0.105.0
## explicit; go 1.21.0
go.opentelemetry.io/collector/semconv/v1.6.1
+# go.opentelemetry.io/contrib/detectors/gcp v1.29.0
+## explicit; go 1.21
+go.opentelemetry.io/contrib/detectors/gcp
# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0
## explicit; go 1.21
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
@@ -1775,6 +1796,7 @@ go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.18.0
go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.21.0
+go.opentelemetry.io/otel/semconv/v1.24.0
go.opentelemetry.io/otel/semconv/v1.26.0
# go.opentelemetry.io/otel/metric v1.29.0
## explicit; go 1.21
@@ -1821,7 +1843,7 @@ go.uber.org/zap/zapgrpc
# go4.org/netipx v0.0.0-20230125063823-8449b0a6169f
## explicit; go 1.18
go4.org/netipx
-# golang.org/x/crypto v0.27.0
+# golang.org/x/crypto v0.28.0
## explicit; go 1.20
golang.org/x/crypto/argon2
golang.org/x/crypto/bcrypt
@@ -1848,7 +1870,7 @@ golang.org/x/exp/slices
# golang.org/x/mod v0.19.0
## explicit; go 1.18
golang.org/x/mod/semver
-# golang.org/x/net v0.29.0
+# golang.org/x/net v0.30.0
## explicit; go 1.18
golang.org/x/net/bpf
golang.org/x/net/context
@@ -1888,7 +1910,7 @@ golang.org/x/oauth2/jwt
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
golang.org/x/sync/singleflight
-# golang.org/x/sys v0.25.0
+# golang.org/x/sys v0.26.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/plan9
@@ -1896,10 +1918,10 @@ golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
golang.org/x/sys/windows/svc/eventlog
-# golang.org/x/term v0.24.0
+# golang.org/x/term v0.25.0
## explicit; go 1.18
golang.org/x/term
-# golang.org/x/text v0.18.0
+# golang.org/x/text v0.19.0
## explicit; go 1.18
golang.org/x/text/cases
golang.org/x/text/encoding
@@ -1923,7 +1945,7 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.6.0
+# golang.org/x/time v0.7.0
## explicit; go 1.18
golang.org/x/time/rate
# golang.org/x/tools v0.23.0
@@ -1944,7 +1966,7 @@ golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/tokeninternal
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
-# google.golang.org/api v0.197.0
+# google.golang.org/api v0.201.0
## explicit; go 1.21
google.golang.org/api/cloudresourcemanager/v1
google.golang.org/api/compute/v1
@@ -1965,13 +1987,13 @@ google.golang.org/api/transport
google.golang.org/api/transport/grpc
google.golang.org/api/transport/http
google.golang.org/api/transport/http/internal/propagation
-# google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1
+# google.golang.org/genproto v0.0.0-20241007155032-5fefd90f89a9
## explicit; go 1.21
google.golang.org/genproto/googleapis/type/calendarperiod
google.golang.org/genproto/googleapis/type/date
google.golang.org/genproto/googleapis/type/expr
google.golang.org/genproto/protobuf/api
-# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
+# google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f
## explicit; go 1.21
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
@@ -1980,7 +2002,7 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1
google.golang.org/genproto/googleapis/api/label
google.golang.org/genproto/googleapis/api/metric
google.golang.org/genproto/googleapis/api/monitoredres
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9
## explicit; go 1.21
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
@@ -2119,8 +2141,12 @@ google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry
google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter
google.golang.org/grpc/xds/internal/xdsclient/xdsresource
google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version
-# google.golang.org/protobuf v1.34.2
-## explicit; go 1.20
+# google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a
+## explicit; go 1.21
+google.golang.org/grpc/stats/opentelemetry
+google.golang.org/grpc/stats/opentelemetry/internal
+# google.golang.org/protobuf v1.35.1
+## explicit; go 1.21
google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
|
fix
|
update module github.com/fsouza/fake-gcs-server to v1.50.2 (#14313)
|
4f534d7317fa0557251f16b76ebf790f079cf98e
|
2024-07-25 00:06:59
|
Sven Grossmann
|
feat(detected-labels): remove cardinality filter (#13652)
| false
|
diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go
index 6d71bf587d5ec..3e658f18c86f4 100644
--- a/pkg/querier/queryrange/roundtrip.go
+++ b/pkg/querier/queryrange/roundtrip.go
@@ -318,9 +318,7 @@ func NewDetectedLabelsCardinalityFilter(rt queryrangebase.Handler) queryrangebas
var result []*logproto.DetectedLabel
for _, dl := range resp.Response.DetectedLabels {
- if dl.Cardinality > 1 && dl.Cardinality < 50 {
- result = append(result, &logproto.DetectedLabel{Label: dl.Label, Cardinality: dl.Cardinality})
- }
+ result = append(result, &logproto.DetectedLabel{Label: dl.Label, Cardinality: dl.Cardinality})
}
return &DetectedLabelsResponse{
Response: &logproto.DetectedLabelsResponse{DetectedLabels: result},
|
feat
|
remove cardinality filter (#13652)
|
d9892b4481d088b122c74bb9f0050396e5bc7300
|
2025-02-10 22:57:37
|
Owen Diehl
|
fix(metastore): empty object corrections (#16147)
| false
|
diff --git a/go.mod b/go.mod
index 645a5ebd1f2e1..c5eb6dff9836d 100644
--- a/go.mod
+++ b/go.mod
@@ -409,4 +409,4 @@ replace github.com/grafana/loki/pkg/push => ./pkg/push
// leodido fork his project to continue support
replace github.com/influxdata/go-syslog/v3 => github.com/leodido/go-syslog/v4 v4.2.0
-replace github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6
+replace github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250210100727-533688b5600d
diff --git a/go.sum b/go.sum
index 2a7393bc82906..64eca14d983df 100644
--- a/go.sum
+++ b/go.sum
@@ -628,8 +628,8 @@ github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 h1:U94jQ2TQr1m3
github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU=
github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6 h1:SlGPi1Sg15c/OzhGMAd7/EOnYJ03ZX6Wuql8lQ2pRU4=
-github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6/go.mod h1:Quz9HUDjGidU0RQpoytzK4KqJ7kwzP+DMAm4K57/usM=
+github.com/grafana/objstore v0.0.0-20250210100727-533688b5600d h1:prt2nn03NfxwgXWZNmC8a7jahg/R6mtyGmfKY3sbd6E=
+github.com/grafana/objstore v0.0.0-20250210100727-533688b5600d/go.mod h1:Quz9HUDjGidU0RQpoytzK4KqJ7kwzP+DMAm4K57/usM=
github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg=
github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
diff --git a/pkg/dataobj/metastore/metastore.go b/pkg/dataobj/metastore/metastore.go
index 0d6b8de85f20e..0db1c4ec5d581 100644
--- a/pkg/dataobj/metastore/metastore.go
+++ b/pkg/dataobj/metastore/metastore.go
@@ -12,6 +12,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/backoff"
+ "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/thanos-io/objstore"
@@ -101,9 +102,14 @@ func (m *Manager) UpdateMetastore(ctx context.Context, dataobjPath string, flush
for m.backoff.Ongoing() {
err = m.bucket.GetAndReplace(ctx, metastorePath, func(existing io.Reader) (io.Reader, error) {
m.buf.Reset()
- _, err := io.Copy(m.buf, existing)
- if err != nil {
- return nil, err
+ if existing != nil {
+ level.Debug(m.logger).Log("msg", "found existing metastore, updating", "path", metastorePath)
+ _, err := io.Copy(m.buf, existing)
+ if err != nil {
+ return nil, errors.Wrap(err, "copying to local buffer")
+ }
+ } else {
+ level.Debug(m.logger).Log("msg", "no existing metastore found, creating new one", "path", metastorePath)
}
m.metastoreBuilder.Reset()
@@ -112,7 +118,7 @@ func (m *Manager) UpdateMetastore(ctx context.Context, dataobjPath string, flush
replayDuration := prometheus.NewTimer(m.metrics.metastoreReplayTime)
object := dataobj.FromReaderAt(bytes.NewReader(m.buf.Bytes()), int64(m.buf.Len()))
if err := m.readFromExisting(ctx, object); err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "reading existing metastore version")
}
replayDuration.ObserveDuration()
}
@@ -120,28 +126,29 @@ func (m *Manager) UpdateMetastore(ctx context.Context, dataobjPath string, flush
encodingDuration := prometheus.NewTimer(m.metrics.metastoreEncodingTime)
ls := fmt.Sprintf("{__start__=\"%d\", __end__=\"%d\", __path__=\"%s\"}", minTimestamp.UnixNano(), maxTimestamp.UnixNano(), dataobjPath)
- err = m.metastoreBuilder.Append(logproto.Stream{
+ err := m.metastoreBuilder.Append(logproto.Stream{
Labels: ls,
Entries: []logproto.Entry{{Line: ""}},
})
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "appending internal metadata stream")
}
m.buf.Reset()
_, err = m.metastoreBuilder.Flush(m.buf)
if err != nil {
- return nil, err
+ return nil, errors.Wrap(err, "flushing metastore builder")
}
encodingDuration.ObserveDuration()
return m.buf, nil
})
if err == nil {
level.Info(m.logger).Log("msg", "successfully merged & updated metastore", "metastore", metastorePath)
+ m.metrics.incMetastoreWrites(statusSuccess)
break
}
level.Error(m.logger).Log("msg", "failed to get and replace metastore object", "err", err, "metastore", metastorePath)
- m.metrics.incMetastoreWriteFailures()
+ m.metrics.incMetastoreWrites(statusFailure)
m.backoff.Wait()
}
// Reset at the end too so we don't leave our memory hanging around between calls.
@@ -155,7 +162,7 @@ func (m *Manager) readFromExisting(ctx context.Context, object *dataobj.Object)
// Fetch sections
si, err := object.Metadata(ctx)
if err != nil {
- return err
+ return errors.Wrap(err, "resolving object metadata")
}
// Read streams from existing metastore object and write them to the builder for the new object
@@ -164,7 +171,7 @@ func (m *Manager) readFromExisting(ctx context.Context, object *dataobj.Object)
streamsReader := dataobj.NewStreamsReader(object, i)
for n, err := streamsReader.Read(ctx, streams); n > 0; n, err = streamsReader.Read(ctx, streams) {
if err != nil && err != io.EOF {
- return err
+ return errors.Wrap(err, "reading streams")
}
for _, stream := range streams[:n] {
err = m.metastoreBuilder.Append(logproto.Stream{
@@ -172,7 +179,7 @@ func (m *Manager) readFromExisting(ctx context.Context, object *dataobj.Object)
Entries: []logproto.Entry{{Line: ""}},
})
if err != nil {
- return err
+ return errors.Wrap(err, "appending streams")
}
}
}
diff --git a/pkg/dataobj/metastore/metrics.go b/pkg/dataobj/metastore/metrics.go
index 424f1e27cccee..0adf6e441290e 100644
--- a/pkg/dataobj/metastore/metrics.go
+++ b/pkg/dataobj/metastore/metrics.go
@@ -6,11 +6,18 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
+type status string
+
+const (
+ statusSuccess status = "success"
+ statusFailure status = "failure"
+)
+
type metastoreMetrics struct {
metastoreProcessingTime prometheus.Histogram
metastoreReplayTime prometheus.Histogram
metastoreEncodingTime prometheus.Histogram
- metastoreWriteFailures prometheus.Counter
+ metastoreWriteFailures *prometheus.CounterVec
}
func newMetastoreMetrics() *metastoreMetrics {
@@ -39,10 +46,10 @@ func newMetastoreMetrics() *metastoreMetrics {
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 0,
}),
- metastoreWriteFailures: prometheus.NewCounter(prometheus.CounterOpts{
- Name: "loki_dataobj_consumer_metastore_write_failures_total",
+ metastoreWriteFailures: prometheus.NewCounterVec(prometheus.CounterOpts{
+ Name: "loki_dataobj_consumer_metastore_writes_total",
Help: "Total number of metastore write failures",
- }),
+ }, []string{"status"}),
}
return metrics
@@ -79,8 +86,8 @@ func (p *metastoreMetrics) unregister(reg prometheus.Registerer) {
}
}
-func (p *metastoreMetrics) incMetastoreWriteFailures() {
- p.metastoreWriteFailures.Inc()
+func (p *metastoreMetrics) incMetastoreWrites(status status) {
+ p.metastoreWriteFailures.WithLabelValues(string(status)).Inc()
}
func (p *metastoreMetrics) observeMetastoreReplay(recordTimestamp time.Time) {
diff --git a/vendor/github.com/thanos-io/objstore/inmem.go b/vendor/github.com/thanos-io/objstore/inmem.go
index 50e0441ca5bf9..053e02bd32ae9 100644
--- a/vendor/github.com/thanos-io/objstore/inmem.go
+++ b/vendor/github.com/thanos-io/objstore/inmem.go
@@ -201,10 +201,6 @@ func (b *InMemBucket) GetAndReplace(ctx context.Context, name string, f func(io.
b.mtx.Lock()
defer b.mtx.Unlock()
- if reader == nil {
- reader = io.NopCloser(bytes.NewReader(nil))
- }
-
new, err := f(reader)
if err != nil {
return err
diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go
index 77540d2817b91..8a6a207f22a0f 100644
--- a/vendor/github.com/thanos-io/objstore/objstore.go
+++ b/vendor/github.com/thanos-io/objstore/objstore.go
@@ -66,6 +66,7 @@ type Bucket interface {
// GetAndReplace an existing object with a new object
// If the previous object is created or updated before the new object is uploaded, then the call will fail with an error.
+ // The existing reader will be nil in the case it did not previously exist.
GetAndReplace(ctx context.Context, name string, f func(existing io.Reader) (io.Reader, error)) error
// Delete removes the object with the given name.
diff --git a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go
index f6538b894edb4..0da66f6fdc6f8 100644
--- a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go
+++ b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go
@@ -4,7 +4,6 @@
package filesystem
import (
- "bytes"
"context"
"fmt"
"io"
@@ -285,15 +284,24 @@ func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reade
}
defer fileLock.Unlock()
- var r io.ReadCloser
- r, err = os.Open(file)
- if err != nil && !os.IsNotExist(err) {
- return err
- } else if err == nil {
- defer r.Close()
+ var missing bool
+ openedFile, err := os.Open(file)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ missing = true
}
- newContent, err := f(wrapReader(r))
+ // redefine the callback reader so a nil originalContent (with concrete type but no value)
+ // doesn't pass nil-checks in the callback
+ var reader io.Reader
+ if !missing {
+ reader = openedFile
+ defer openedFile.Close()
+ }
+
+ newContent, err := f(reader)
if err != nil {
return err
}
@@ -306,13 +314,6 @@ func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reade
return os.WriteFile(file, content, 0600)
}
-func wrapReader(r io.Reader) io.Reader {
- if r == nil {
- return bytes.NewReader(nil)
- }
- return r
-}
-
func isDirEmpty(name string) (ok bool, err error) {
f, err := os.Open(filepath.Clean(name))
if os.IsNotExist(err) {
diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go
index ee702963190e4..3d04decbb24e2 100644
--- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go
+++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go
@@ -5,7 +5,6 @@
package gcs
import (
- "bytes"
"context"
"fmt"
"io"
@@ -369,38 +368,35 @@ func (b *Bucket) upload(ctx context.Context, name string, r io.Reader, generatio
}
func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error {
- var mustNotExist bool
var generation int64
+ var missing bool
// Get the current object
storageReader, err := b.get(ctx, name)
- if err != nil && !errors.Is(err, storage.ErrObjectNotExist) {
- return err
- } else if errors.Is(err, storage.ErrObjectNotExist) {
- mustNotExist = true
+ if err != nil {
+ if !errors.Is(err, storage.ErrObjectNotExist) {
+ return err
+ }
+ missing = true
}
+ // redefine the callback reader so a nil originalContent (with concrete type but no value)
+ // doesn't pass nil-checks in the callback
+ var reader io.Reader
// If object exists, ensure we close the reader when done
- if storageReader != nil {
+ if !missing {
generation = storageReader.Attrs.Generation
+ reader = storageReader
defer storageReader.Close()
}
- newContent, err := f(wrapReader(storageReader))
+ newContent, err := f(reader)
if err != nil {
return err
}
// Upload with the previous generation, or mustNotExist for new objects
- return b.upload(ctx, name, newContent, generation, mustNotExist)
-}
-
-func wrapReader(r *storage.Reader) io.Reader {
- if r == nil {
- return bytes.NewReader(nil)
- }
-
- return r
+ return b.upload(ctx, name, newContent, generation, missing)
}
// Delete removes the object with the given name.
diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go
index 5fbed6464c5c3..2b1696b7d5c8b 100644
--- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go
+++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go
@@ -609,26 +609,35 @@ func (b *Bucket) upload(ctx context.Context, name string, r io.Reader, etag stri
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reader) (io.Reader, error)) error {
- var requireNewObject bool
+ var missing bool
originalContent, err := b.getRange(ctx, name, 0, -1)
- if err != nil && !b.IsObjNotFoundErr(err) {
- return err
- } else if b.IsObjNotFoundErr(err) {
- requireNewObject = true
+ if err != nil {
+ if !b.IsObjNotFoundErr(err) {
+ return err
+ }
+ missing = true
}
- // Call work function to get a new version of the file
- newContent, err := f(originalContent)
- if err != nil {
- return err
+ // redefine the callback reader so a nil originalContent (with concrete type but no value)
+ // doesn't pass nil-checks in the callback
+ var reader io.Reader
+ var etag string
+ if !missing {
+ reader = originalContent
+ stats, err := originalContent.Stat()
+ if err != nil {
+ return err
+ }
+ etag = stats.ETag
}
- stats, err := originalContent.Stat()
+ // Call work function to get a new version of the file
+ newContent, err := f(reader)
if err != nil {
return err
}
- return b.upload(ctx, name, newContent, stats.ETag, requireNewObject)
+ return b.upload(ctx, name, newContent, etag, missing)
}
// Attributes returns information about the specified object.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 4636a87382577..1983c56995324 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1633,7 +1633,7 @@ github.com/stretchr/testify/assert/yaml
github.com/stretchr/testify/mock
github.com/stretchr/testify/require
github.com/stretchr/testify/suite
-# github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a => github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6
+# github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a => github.com/grafana/objstore v0.0.0-20250210100727-533688b5600d
## explicit; go 1.22
github.com/thanos-io/objstore
github.com/thanos-io/objstore/clientutil
@@ -2566,4 +2566,4 @@ sigs.k8s.io/yaml/goyaml.v2
# github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
# github.com/grafana/loki/pkg/push => ./pkg/push
# github.com/influxdata/go-syslog/v3 => github.com/leodido/go-syslog/v4 v4.2.0
-# github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6
+# github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250210100727-533688b5600d
|
fix
|
empty object corrections (#16147)
|
bc9ad25a3838e9b398e64950891e06bfda3e43d9
|
2024-11-01 20:59:37
|
renovate[bot]
|
chore(deps): update grafana/grafana docker tag to v9.5.21 (#14697)
| false
|
diff --git a/production/docker/docker-compose.yaml b/production/docker/docker-compose.yaml
index 3200b4cab6e97..65aff80b945fc 100644
--- a/production/docker/docker-compose.yaml
+++ b/production/docker/docker-compose.yaml
@@ -26,7 +26,7 @@ services:
- loki
grafana:
- image: grafana/grafana:9.1.6
+ image: grafana/grafana:9.5.21
ports:
- "3000:3000"
environment:
|
chore
|
update grafana/grafana docker tag to v9.5.21 (#14697)
|
f2394355d81f33d7a5cec6490cc4523619e596e8
|
2023-06-09 18:30:31
|
Christophe Collot
|
feat(lambda-promtail): add cloudfront log file ingestion support (#9573)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 93f4818827397..3add6541557b8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,8 @@
##### Enhancements
+* [9573](https://github.com/grafana/loki/pull/9573) **CCOLLOT**: Lambda-Promtail: Add support for AWS CloudFront log ingestion.
+* [9497](https://github.com/grafana/loki/pull/9497) **CCOLLOT**: Lambda-Promtail: Add support for AWS CloudTrail log ingestion.
* [8886](https://github.com/grafana/loki/pull/8886) **MichelHollands**: Add new logql template function `unixToTime`
* [8067](https://github.com/grafana/loki/pull/9497) **CCOLLOT**: Lambda-Promtail: Add support for AWS CloudTrail log ingestion.
* [9515](https://github.com/grafana/loki/pull/9515) **MichelHollands**: Fix String() on vector aggregation LogQL expressions that contain `without ()`.
diff --git a/docs/sources/clients/lambda-promtail/_index.md b/docs/sources/clients/lambda-promtail/_index.md
index a42494700ce5a..f3fff1bcea110 100644
--- a/docs/sources/clients/lambda-promtail/_index.md
+++ b/docs/sources/clients/lambda-promtail/_index.md
@@ -109,9 +109,10 @@ This workflow allows ingesting AWS loadbalancer logs stored on S3 to Loki.
This workflow allows ingesting AWS Cloudtrail logs stored on S3 to Loki.
-### Cloudfront real-time logs
-
-Cloudfront [real-time logs](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html) can be sent to a Kinesis data stream. The data stream can be mapped to be an [event source](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html) for lambda-promtail to deliver the logs to Loki.
+### Cloudfront logs
+Cloudfront logs can be either batched or streamed in real time to Loki:
++ Logging can be activated on a Cloudfront distribution with an S3 bucket as the destination. In this case, the workflow is the same as for other services (VPC Flow logs, Loadbalancer logs, Cloudtrail logs).
++ Cloudfront [real-time logs](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html) can be sent to a Kinesis data stream. The data stream can be mapped to be an [event source](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html) for lambda-promtail to deliver the logs to Loki.
### Triggering Lambda-Promtail via SQS
For AWS services supporting sending messages to SQS (for example, S3 with an S3 Notification to SQS), events can be processed through an [SQS queue using a lambda trigger](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) instead of directly configuring the source service to trigger lambda. Lambda-promtail will retrieve the nested events from the SQS messages' body and process them as if them came directly from the source service.
diff --git a/tools/lambda-promtail/lambda-promtail/s3.go b/tools/lambda-promtail/lambda-promtail/s3.go
index 93f3d8e9d89cc..bb7a94d032e46 100644
--- a/tools/lambda-promtail/lambda-promtail/s3.go
+++ b/tools/lambda-promtail/lambda-promtail/s3.go
@@ -21,11 +21,27 @@ import (
"github.com/aws/aws-sdk-go-v2/service/s3"
)
+type parserConfig struct {
+ // value to use for __aws_log_type label
+ logTypeLabel string
+ // regex matching filename and and exporting labels from it
+ filenameRegex *regexp.Regexp
+ // regex that extracts the timestamp from the log sample
+ timestampRegex *regexp.Regexp
+ // time format to use to convert the timestamp to time.Time
+ timestampFormat string
+ // how many lines or jsonToken to skip at the beginning of the file
+ skipHeaderCount int
+ // key of the metadata label to use as a value for the__aws_<logType>_owner label
+ ownerLabelKey string
+}
+
const (
FLOW_LOG_TYPE string = "vpcflowlogs"
LB_LOG_TYPE string = "elasticloadbalancing"
CLOUDTRAIL_LOG_TYPE string = "CloudTrail"
CLOUDTRAIL_DIGEST_LOG_TYPE string = "CloudTrail-Digest"
+ CLOUDFRONT_LOG_TYPE string = "cloudfront"
)
var (
@@ -40,15 +56,45 @@ var (
// CloudTrail
// source: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-examples.html#cloudtrail-log-filename-format
// example: 111122223333_CloudTrail_us-east-2_20150801T0210Z_Mu0KsOhtH1ar15ZZ.json.gz
- defaultFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<account_id>\d+)\/(?P<type>[a-zA-Z0-9_\-]+)\/(?P<region>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/\d+\_(?:elasticloadbalancing|vpcflowlogs)\_\w+-\w+-\d_(?:(?:app|nlb|net)\.*?)?(?P<src>[a-zA-Z0-9\-]+)`)
- cloudtrailFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<account_id>\d+)\/(?P<type>[a-zA-Z0-9_\-]+)\/(?P<region>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/\d+\_(?:CloudTrail|CloudTrail-Digest)\_\w+-\w+-\d_(?:(?:app|nlb|net)\.*?)?.+_(?P<src>[a-zA-Z0-9\-]+)`)
- filenameRegexes = map[string]*regexp.Regexp{
- FLOW_LOG_TYPE: defaultFilenameRegex,
- LB_LOG_TYPE: defaultFilenameRegex,
- CLOUDTRAIL_LOG_TYPE: cloudtrailFilenameRegex,
+ // CloudFront
+ // source https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html#AccessLogsFileNaming
+ // example: example-prefix/EMLARXS9EXAMPLE.2019-11-14-20.RT4KCN4SGK9.gz
+ defaultFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<account_id>\d+)\/(?P<type>[a-zA-Z0-9_\-]+)\/(?P<region>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/\d+\_(?:elasticloadbalancing|vpcflowlogs)\_\w+-\w+-\d_(?:(?:app|nlb|net)\.*?)?(?P<src>[a-zA-Z0-9\-]+)`)
+ defaultTimestampRegex = regexp.MustCompile(`\w+ (?P<timestamp>\d+-\d+-\d+T\d+:\d+:\d+\.\d+Z)`)
+ cloudtrailFilenameRegex = regexp.MustCompile(`AWSLogs\/(?P<account_id>\d+)\/(?P<type>[a-zA-Z0-9_\-]+)\/(?P<region>[\w-]+)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<day>\d+)\/\d+\_(?:CloudTrail|CloudTrail-Digest)\_\w+-\w+-\d_(?:(?:app|nlb|net)\.*?)?.+_(?P<src>[a-zA-Z0-9\-]+)`)
+ cloudfrontFilenameRegex = regexp.MustCompile(`(?P<prefix>.*)\/(?P<src>[A-Z0-9]+)\.(?P<year>\d+)-(?P<month>\d+)-(?P<day>\d+)-(.+)`)
+ cloudfrontTimestampRegex = regexp.MustCompile(`(?P<timestamp>\d+-\d+-\d+\s\d+:\d+:\d+)`)
+ parsers = map[string]parserConfig{
+ FLOW_LOG_TYPE: {
+ logTypeLabel: "s3_vpc_flow",
+ filenameRegex: defaultFilenameRegex,
+ ownerLabelKey: "account_id",
+ timestampRegex: defaultTimestampRegex,
+ timestampFormat: time.RFC3339,
+ skipHeaderCount: 1,
+ },
+ LB_LOG_TYPE: {
+ logTypeLabel: "s3_lb",
+ filenameRegex: defaultFilenameRegex,
+ ownerLabelKey: "account_id",
+ timestampFormat: time.RFC3339,
+ timestampRegex: defaultTimestampRegex,
+ },
+ CLOUDTRAIL_LOG_TYPE: {
+ logTypeLabel: "s3_cloudtrail",
+ ownerLabelKey: "account_id",
+ skipHeaderCount: 3,
+ filenameRegex: cloudtrailFilenameRegex,
+ },
+ CLOUDFRONT_LOG_TYPE: {
+ logTypeLabel: "s3_cloudfront",
+ filenameRegex: cloudfrontFilenameRegex,
+ ownerLabelKey: "prefix",
+ timestampRegex: cloudfrontTimestampRegex,
+ timestampFormat: "2006-01-02\x0915:04:05",
+ skipHeaderCount: 2,
+ },
}
- // regex that extracts the timestamp (RFC3339) from message log
- timestampRegex = regexp.MustCompile(`\w+ (?P<timestamp>\d+-\d+-\d+T\d+:\d+:\d+\.\d+Z)`)
)
func getS3Client(ctx context.Context, region string) (*s3.Client, error) {
@@ -68,6 +114,13 @@ func getS3Client(ctx context.Context, region string) (*s3.Client, error) {
}
func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io.ReadCloser) error {
+ parser, ok := parsers[labels["type"]]
+ if !ok {
+ if labels["type"] == CLOUDTRAIL_DIGEST_LOG_TYPE {
+ return nil
+ }
+ return fmt.Errorf("could not find parser for type %s", labels["type"])
+ }
gzreader, err := gzip.NewReader(obj)
if err != nil {
return err
@@ -75,25 +128,10 @@ func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io.
scanner := bufio.NewScanner(gzreader)
- skipHeader := false
- logType := ""
- switch labels["type"] {
- case FLOW_LOG_TYPE:
- skipHeader = true
- logType = "s3_vpc_flow"
- case LB_LOG_TYPE:
- logType = "s3_lb"
- case CLOUDTRAIL_LOG_TYPE:
- logType = "s3_cloudtrail"
- case CLOUDTRAIL_DIGEST_LOG_TYPE:
- // do not ingest digest files' content
- return nil
- }
-
ls := model.LabelSet{
- model.LabelName("__aws_log_type"): model.LabelValue(logType),
- model.LabelName(fmt.Sprintf("__aws_%s", logType)): model.LabelValue(labels["src"]),
- model.LabelName(fmt.Sprintf("__aws_%s_owner", logType)): model.LabelValue(labels["account_id"]),
+ model.LabelName("__aws_log_type"): model.LabelValue(parser.logTypeLabel),
+ model.LabelName(fmt.Sprintf("__aws_%s", parser.logTypeLabel)): model.LabelValue(labels["src"]),
+ model.LabelName(fmt.Sprintf("__aws_%s_owner", parser.logTypeLabel)): model.LabelValue(labels[parser.ownerLabelKey]),
}
ls = applyExtraLabels(ls)
@@ -102,7 +140,7 @@ func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io.
if labels["type"] == CLOUDTRAIL_LOG_TYPE {
records := make(chan Record)
jsonStream := NewJSONStream(records)
- go jsonStream.Start(gzreader, 3)
+ go jsonStream.Start(gzreader, parser.skipHeaderCount)
// Stream json file
for record := range jsonStream.records {
if record.Error != nil {
@@ -123,17 +161,17 @@ func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io.
for scanner.Scan() {
log_line := scanner.Text()
lineCount++
- if lineCount == 1 && skipHeader {
+ if lineCount <= parser.skipHeaderCount {
continue
}
if printLogLine {
fmt.Println(log_line)
}
- match := timestampRegex.FindStringSubmatch(log_line)
timestamp := time.Now()
+ match := parser.timestampRegex.FindStringSubmatch(log_line)
if len(match) > 0 {
- timestamp, err = time.Parse(time.RFC3339, match[1])
+ timestamp, err = time.Parse(parser.timestampFormat, match[1])
if err != nil {
return err
}
@@ -151,24 +189,23 @@ func parseS3Log(ctx context.Context, b *batch, labels map[string]string, obj io.
}
func getLabels(record events.S3EventRecord) (map[string]string, error) {
+
labels := make(map[string]string)
labels["key"] = record.S3.Object.Key
labels["bucket"] = record.S3.Bucket.Name
labels["bucket_owner"] = record.S3.Bucket.OwnerIdentity.PrincipalID
labels["bucket_region"] = record.AWSRegion
- var matchingExp *regexp.Regexp
var matchingType *string
- for key, exp := range filenameRegexes {
- if exp.MatchString(labels["key"]) {
- matchingExp = exp
+ for key, p := range parsers {
+ if p.filenameRegex.MatchString(labels["key"]) {
matchingType = aws.String(key)
- }
- }
- match := matchingExp.FindStringSubmatch(labels["key"])
- for i, name := range matchingExp.SubexpNames() {
- if i != 0 && name != "" {
- labels[name] = match[i]
+ match := p.filenameRegex.FindStringSubmatch(labels["key"])
+ for i, name := range p.filenameRegex.SubexpNames() {
+ if i != 0 && name != "" {
+ labels[name] = match[i]
+ }
+ }
}
}
if labels["type"] == "" {
diff --git a/tools/lambda-promtail/lambda-promtail/s3_test.go b/tools/lambda-promtail/lambda-promtail/s3_test.go
index 83f1161b6d956..18f808825799b 100644
--- a/tools/lambda-promtail/lambda-promtail/s3_test.go
+++ b/tools/lambda-promtail/lambda-promtail/s3_test.go
@@ -89,6 +89,39 @@ func Test_getLabels(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "cloudtrail_digest_logs",
+ args: args{
+ record: events.S3EventRecord{
+ AWSRegion: "us-east-1",
+ S3: events.S3Entity{
+ Bucket: events.S3Bucket{
+ Name: "cloudtrail_digest_logs_test",
+ OwnerIdentity: events.S3UserIdentity{
+ PrincipalID: "test",
+ },
+ },
+ Object: events.S3Object{
+ Key: "my-bucket/AWSLogs/123456789012/CloudTrail-Digest/us-east-1/2022/01/24/123456789012_CloudTrail-Digest_us-east-1_20220124T0000Z_4jhzXFO2Jlvu2b3y.json.gz",
+ },
+ },
+ },
+ },
+ want: map[string]string{
+ "account_id": "123456789012",
+ "bucket": "cloudtrail_digest_logs_test",
+ "bucket_owner": "test",
+ "bucket_region": "us-east-1",
+ "day": "24",
+ "key": "my-bucket/AWSLogs/123456789012/CloudTrail-Digest/us-east-1/2022/01/24/123456789012_CloudTrail-Digest_us-east-1_20220124T0000Z_4jhzXFO2Jlvu2b3y.json.gz",
+ "month": "01",
+ "region": "us-east-1",
+ "src": "4jhzXFO2Jlvu2b3y",
+ "type": CLOUDTRAIL_DIGEST_LOG_TYPE,
+ "year": "2022",
+ },
+ wantErr: false,
+ },
{
name: "cloudtrail_logs",
args: args{
@@ -122,6 +155,38 @@ func Test_getLabels(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "s3_cloudfront",
+ args: args{
+ record: events.S3EventRecord{
+ AWSRegion: "us-east-1",
+ S3: events.S3Entity{
+ Bucket: events.S3Bucket{
+ Name: "cloudfront_logs_test",
+ OwnerIdentity: events.S3UserIdentity{
+ PrincipalID: "test",
+ },
+ },
+ Object: events.S3Object{
+ Key: "my/bucket/prefix/E2K2LNL5N3WR51.2022-07-18-12.a10a8496.gz",
+ },
+ },
+ },
+ },
+ want: map[string]string{
+ "bucket": "cloudfront_logs_test",
+ "bucket_owner": "test",
+ "bucket_region": "us-east-1",
+ "day": "18",
+ "key": "my/bucket/prefix/E2K2LNL5N3WR51.2022-07-18-12.a10a8496.gz",
+ "month": "07",
+ "prefix": "my/bucket/prefix",
+ "src": "E2K2LNL5N3WR51",
+ "type": CLOUDFRONT_LOG_TYPE,
+ "year": "2022",
+ },
+ wantErr: false,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -149,6 +214,7 @@ func Test_parseS3Log(t *testing.T) {
name string
args args
wantErr bool
+ expectedLen int
expectedStream string
}{
{
@@ -165,6 +231,7 @@ func Test_parseS3Log(t *testing.T) {
"account_id": "123456789",
},
},
+ expectedLen: 1,
expectedStream: `{__aws_log_type="s3_vpc_flow", __aws_s3_vpc_flow="source", __aws_s3_vpc_flow_owner="123456789"}`,
wantErr: false,
},
@@ -182,6 +249,7 @@ func Test_parseS3Log(t *testing.T) {
"account_id": "123456789",
},
},
+ expectedLen: 1,
expectedStream: `{__aws_log_type="s3_lb", __aws_s3_lb="source", __aws_s3_lb_owner="123456789"}`,
wantErr: false,
},
@@ -199,9 +267,46 @@ func Test_parseS3Log(t *testing.T) {
"account_id": "123456789",
},
},
+ expectedLen: 1,
expectedStream: `{__aws_log_type="s3_cloudtrail", __aws_s3_cloudtrail="source", __aws_s3_cloudtrail_owner="123456789"}`,
wantErr: false,
},
+ {
+ name: "cloudtrail_digest_logs",
+ args: args{
+ batchSize: 131072, // Set large enough we don't try and send to promtail
+ filename: "../testdata/cloudtrail-log-file.json.gz",
+ b: &batch{
+ streams: map[string]*logproto.Stream{},
+ },
+ labels: map[string]string{
+ "type": CLOUDTRAIL_DIGEST_LOG_TYPE,
+ "src": "source",
+ "account_id": "123456789",
+ },
+ },
+ expectedLen: 0,
+ expectedStream: ``,
+ wantErr: false,
+ },
+ {
+ name: "cloudfrontlogs",
+ args: args{
+ batchSize: 131072, // Set large enough we don't try and send to promtail
+ filename: "../testdata/cloudfront.log.gz",
+ b: &batch{
+ streams: map[string]*logproto.Stream{},
+ },
+ labels: map[string]string{
+ "type": CLOUDFRONT_LOG_TYPE,
+ "src": "DISTRIBUTIONID",
+ "prefix": "path/to/file",
+ },
+ },
+ expectedLen: 1,
+ expectedStream: `{__aws_log_type="s3_cloudfront", __aws_s3_cloudfront="DISTRIBUTIONID", __aws_s3_cloudfront_owner="path/to/file"}`,
+ wantErr: false,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -214,10 +319,12 @@ func Test_parseS3Log(t *testing.T) {
if err := parseS3Log(context.Background(), tt.args.b, tt.args.labels, tt.args.obj); (err != nil) != tt.wantErr {
t.Errorf("parseS3Log() error = %v, wantErr %v", err, tt.wantErr)
}
- require.Len(t, tt.args.b.streams, 1)
- stream, ok := tt.args.b.streams[tt.expectedStream]
- require.True(t, ok, "batch does not contain stream: %s", tt.expectedStream)
- require.NotNil(t, stream)
+ require.Len(t, tt.args.b.streams, tt.expectedLen)
+ if tt.expectedStream != "" {
+ stream, ok := tt.args.b.streams[tt.expectedStream]
+ require.True(t, ok, "batch does not contain stream: %s", tt.expectedStream)
+ require.NotNil(t, stream)
+ }
})
}
}
diff --git a/tools/lambda-promtail/testdata/cloudfront.log.gz b/tools/lambda-promtail/testdata/cloudfront.log.gz
new file mode 100644
index 0000000000000..c37a63d55a6c4
Binary files /dev/null and b/tools/lambda-promtail/testdata/cloudfront.log.gz differ
|
feat
|
add cloudfront log file ingestion support (#9573)
|
77cf6fa0cc2b0d93cfa7bb88b117def4123d093f
|
2024-06-20 15:37:09
|
George Robinson
|
chore: Update upgrade docs for -ruler.alertmanager-use-v2 (#13264)
| false
|
diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md
index e5abde43173d7..547da559bb1fa 100644
--- a/docs/sources/setup/upgrade/_index.md
+++ b/docs/sources/setup/upgrade/_index.md
@@ -36,6 +36,8 @@ The output is incredibly verbose as it shows the entire internal config struct u
## Main / Unreleased
+Loki changes the default value of `-ruler.alertmanager-use-v2` from `false` to `true`. Alertmanager APIv1 was deprecated in Alertmanager 0.16.0 and is removed as of 0.27.0.
+
## 3.0.0
{{% admonition type="note" %}}
|
chore
|
Update upgrade docs for -ruler.alertmanager-use-v2 (#13264)
|
9621d5c5ee3975df8be36df9a637e315e9a5c9d2
|
2024-03-19 13:35:16
|
Bryan Boreham
|
test: Fix race condition in LogQL test (#12247)
| false
|
diff --git a/pkg/iter/sample_iterator.go b/pkg/iter/sample_iterator.go
index 4c17c473e9f0d..632ed9106df15 100644
--- a/pkg/iter/sample_iterator.go
+++ b/pkg/iter/sample_iterator.go
@@ -3,7 +3,6 @@ package iter
import (
"container/heap"
"context"
- "go.uber.org/atomic"
"io"
"sync"
@@ -522,7 +521,7 @@ func NewSampleQueryResponseIterator(resp *logproto.SampleQueryResponse) SampleIt
}
type seriesIterator struct {
- i *atomic.Int32
+ i int
series logproto.Series
}
@@ -568,14 +567,14 @@ func NewMultiSeriesIterator(series []logproto.Series) SampleIterator {
// NewSeriesIterator iterates over sample in a series.
func NewSeriesIterator(series logproto.Series) SampleIterator {
return &seriesIterator{
- i: atomic.NewInt32(-1),
+ i: -1,
series: series,
}
}
func (i *seriesIterator) Next() bool {
- tmp := i.i.Add(1)
- return int(tmp) < len(i.series.Samples)
+ i.i++
+ return i.i < len(i.series.Samples)
}
func (i *seriesIterator) Error() error {
@@ -591,7 +590,7 @@ func (i *seriesIterator) StreamHash() uint64 {
}
func (i *seriesIterator) Sample() logproto.Sample {
- return i.series.Samples[i.i.Load()]
+ return i.series.Samples[i.i]
}
func (i *seriesIterator) Close() error {
diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go
index 2dce4ba57ed41..1391b40ff4248 100644
--- a/pkg/logql/engine_test.go
+++ b/pkg/logql/engine_test.go
@@ -2383,16 +2383,16 @@ func TestEngine_LogsInstantQuery_Vector(t *testing.T) {
}
type errorIteratorQuerier struct {
- samples []iter.SampleIterator
- entries []iter.EntryIterator
+ samples func() []iter.SampleIterator
+ entries func() []iter.EntryIterator
}
func (e errorIteratorQuerier) SelectLogs(_ context.Context, p SelectLogParams) (iter.EntryIterator, error) {
- return iter.NewSortEntryIterator(e.entries, p.Direction), nil
+ return iter.NewSortEntryIterator(e.entries(), p.Direction), nil
}
func (e errorIteratorQuerier) SelectSamples(_ context.Context, _ SelectSampleParams) (iter.SampleIterator, error) {
- return iter.NewSortSampleIterator(e.samples), nil
+ return iter.NewSortSampleIterator(e.samples()), nil
}
func TestStepEvaluator_Error(t *testing.T) {
@@ -2406,9 +2406,11 @@ func TestStepEvaluator_Error(t *testing.T) {
"rangeAggEvaluator",
`count_over_time({app="foo"}[1m])`,
&errorIteratorQuerier{
- samples: []iter.SampleIterator{
- iter.NewSeriesIterator(newSeries(testSize, identity, `{app="foo"}`)),
- NewErrorSampleIterator(),
+ samples: func() []iter.SampleIterator {
+ return []iter.SampleIterator{
+ iter.NewSeriesIterator(newSeries(testSize, identity, `{app="foo"}`)),
+ NewErrorSampleIterator(),
+ }
},
},
ErrMock,
@@ -2417,9 +2419,11 @@ func TestStepEvaluator_Error(t *testing.T) {
"stream",
`{app="foo"}`,
&errorIteratorQuerier{
- entries: []iter.EntryIterator{
- iter.NewStreamIterator(newStream(testSize, identity, `{app="foo"}`)),
- NewErrorEntryIterator(),
+ entries: func() []iter.EntryIterator {
+ return []iter.EntryIterator{
+ iter.NewStreamIterator(newStream(testSize, identity, `{app="foo"}`)),
+ NewErrorEntryIterator(),
+ }
},
},
ErrMock,
@@ -2428,9 +2432,11 @@ func TestStepEvaluator_Error(t *testing.T) {
"binOpStepEvaluator",
`count_over_time({app="foo"}[1m]) / count_over_time({app="foo"}[1m])`,
&errorIteratorQuerier{
- samples: []iter.SampleIterator{
- iter.NewSeriesIterator(newSeries(testSize, identity, `{app="foo"}`)),
- NewErrorSampleIterator(),
+ samples: func() []iter.SampleIterator {
+ return []iter.SampleIterator{
+ iter.NewSeriesIterator(newSeries(testSize, identity, `{app="foo"}`)),
+ NewErrorSampleIterator(),
+ }
},
},
ErrMockMultiple,
|
test
|
Fix race condition in LogQL test (#12247)
|
990f71c5e90a1b9921820ac06555b9ecd1a6539c
|
2024-11-29 18:19:46
|
Kai Burjack
|
feat(helm): Compute ConfigMap/Secret checksum only over .data content (#15177)
| false
|
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index 29fdc3aa12747..acc66905ddc23 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -1083,7 +1083,7 @@ enableServiceLinks: false
{{- end }}
{{- define "loki.config.checksum" -}}
-checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256sum }}
+checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- end -}}
{{/*
@@ -1120,3 +1120,13 @@ Return the appropriate apiVersion for HorizontalPodAutoscaler.
{{- print "autoscaling/v2beta1" -}}
{{- end -}}
{{- end -}}
+
+{{/*
+compute a ConfigMap or Secret checksum only based on its .data content.
+This function needs to be called with a context object containing the following keys:
+- ctx: the current Helm context (what '.' is at the call site)
+- name: the file name of the ConfigMap or Secret
+*/}}
+{{- define "loki.configMapOrSecretContentHash" -}}
+{{ get (include (print .ctx.Template.BasePath .name) .ctx | fromYaml) "data" | toYaml | sha256sum }}
+{{- end }}
diff --git a/production/helm/loki/templates/admin-api/deployment-admin-api.yaml b/production/helm/loki/templates/admin-api/deployment-admin-api.yaml
index 2d356882204b6..c5e97dd004c0a 100644
--- a/production/helm/loki/templates/admin-api/deployment-admin-api.yaml
+++ b/production/helm/loki/templates/admin-api/deployment-admin-api.yaml
@@ -32,7 +32,7 @@ spec:
{{- if .Values.useExternalConfig }}
checksum/config: {{ .Values.externalConfigVersion }}
{{- else }}
- checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- end}}
{{- with .Values.adminApi.annotations }}
{{- toYaml . | nindent 8 }}
diff --git a/production/helm/loki/templates/backend/statefulset-backend.yaml b/production/helm/loki/templates/backend/statefulset-backend.yaml
index c20ce9f9a0552..bf2e18da80b4a 100644
--- a/production/helm/loki/templates/backend/statefulset-backend.yaml
+++ b/production/helm/loki/templates/backend/statefulset-backend.yaml
@@ -47,7 +47,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- with .Values.loki.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
diff --git a/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml b/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml
index eb089a1a60cfd..22bf5c1fd6183 100644
--- a/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml
+++ b/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml
@@ -32,7 +32,7 @@ spec:
{{- if .Values.useExternalConfig }}
checksum/config: {{ .Values.externalConfigVersion }}
{{- else }}
- checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- end}}
{{- with .Values.enterpriseGateway.annotations }}
{{- toYaml . | nindent 8 }}
diff --git a/production/helm/loki/templates/gateway/deployment-gateway-nginx.yaml b/production/helm/loki/templates/gateway/deployment-gateway-nginx.yaml
index 2b2d4c7bd7bb7..94562ad165c97 100644
--- a/production/helm/loki/templates/gateway/deployment-gateway-nginx.yaml
+++ b/production/helm/loki/templates/gateway/deployment-gateway-nginx.yaml
@@ -30,7 +30,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: {{ include (print .Template.BasePath "/gateway/configmap-gateway.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/gateway/configmap-gateway.yaml") }}
{{- with .Values.loki.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
diff --git a/production/helm/loki/templates/read/deployment-read.yaml b/production/helm/loki/templates/read/deployment-read.yaml
index 245119cb44dca..f024a40fc6d5f 100644
--- a/production/helm/loki/templates/read/deployment-read.yaml
+++ b/production/helm/loki/templates/read/deployment-read.yaml
@@ -33,7 +33,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- with .Values.loki.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
diff --git a/production/helm/loki/templates/read/statefulset-read.yaml b/production/helm/loki/templates/read/statefulset-read.yaml
index 9d4213b174588..93527f9d616da 100644
--- a/production/helm/loki/templates/read/statefulset-read.yaml
+++ b/production/helm/loki/templates/read/statefulset-read.yaml
@@ -47,7 +47,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- with .Values.loki.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
diff --git a/production/helm/loki/templates/single-binary/statefulset.yaml b/production/helm/loki/templates/single-binary/statefulset.yaml
index 4acd406b9cfa5..54f3d925404db 100644
--- a/production/helm/loki/templates/single-binary/statefulset.yaml
+++ b/production/helm/loki/templates/single-binary/statefulset.yaml
@@ -41,7 +41,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- with .Values.loki.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
diff --git a/production/helm/loki/templates/table-manager/deployment-table-manager.yaml b/production/helm/loki/templates/table-manager/deployment-table-manager.yaml
index e3f6d0d94a696..770629be95bd2 100644
--- a/production/helm/loki/templates/table-manager/deployment-table-manager.yaml
+++ b/production/helm/loki/templates/table-manager/deployment-table-manager.yaml
@@ -21,7 +21,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- with .Values.loki.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml
index 4d6183b291c13..fc3b301354db8 100644
--- a/production/helm/loki/templates/write/statefulset-write.yaml
+++ b/production/helm/loki/templates/write/statefulset-write.yaml
@@ -47,7 +47,7 @@ spec:
template:
metadata:
annotations:
- checksum/config: {{ include (print .Template.BasePath "/config.yaml") . | sha256sum }}
+ checksum/config: {{ include "loki.configMapOrSecretContentHash" (dict "ctx" . "name" "/config.yaml") }}
{{- with .Values.loki.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
|
feat
|
Compute ConfigMap/Secret checksum only over .data content (#15177)
|
21dd4afdc76d7790e177d2dd364ecf5b629c8112
|
2024-05-30 19:04:05
|
Jack Baldry
|
docs: Republish the sizing calculator but don't list it in the table of contents and don't index it (#13070)
| false
|
diff --git a/docs/sources/setup/size/_index.md b/docs/sources/setup/size/_index.md
index 74dcb8e504964..162748eb9e3b8 100644
--- a/docs/sources/setup/size/_index.md
+++ b/docs/sources/setup/size/_index.md
@@ -1,4 +1,7 @@
---
+_build:
+ list: false
+noindex: true
title: Size the cluster
menuTitle: Size the cluster
description: Provides a tool that generates a Helm Chart values.yaml file based on expected ingestion, retention rate, and node type, to help size your Grafana deployment.
@@ -6,7 +9,6 @@ aliases:
- ../installation/sizing/
- ../installation/helm/generate
weight: 100
-draft: true
---
<link rel="stylesheet" href="../../query/analyzer/style.css">
|
docs
|
Republish the sizing calculator but don't list it in the table of contents and don't index it (#13070)
|
80aec2548203957dbb834ba69e6d734d9054416d
|
2024-11-01 22:53:36
|
Trevor Whitney
|
feat: Improve pattern ingester tracing (#14707)
| false
|
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 1ebbe17118bf7..de72997990347 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -682,6 +682,7 @@ func (t *Loki) initPatternIngesterTee() (services.Service, error) {
t.Cfg.Pattern,
t.Overrides,
t.PatternRingClient,
+ t.tenantConfigs,
t.Cfg.MetricsNamespace,
prometheus.DefaultRegisterer,
logger,
diff --git a/pkg/pattern/aggregation/push.go b/pkg/pattern/aggregation/push.go
index 649d71f92029c..a282913fe5081 100644
--- a/pkg/pattern/aggregation/push.go
+++ b/pkg/pattern/aggregation/push.go
@@ -8,12 +8,14 @@ import (
"io"
"net/http"
"net/url"
+ "strings"
"sync"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/golang/snappy"
+ "github.com/opentracing/opentracing-go"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
@@ -160,7 +162,13 @@ func (p *Push) Stop() {
}
// buildPayload creates the snappy compressed protobuf to send to Loki
-func (p *Push) buildPayload() ([]byte, error) {
+func (p *Push) buildPayload(ctx context.Context) ([]byte, error) {
+ sp, _ := opentracing.StartSpanFromContext(
+ ctx,
+ "patternIngester.aggregation.Push.buildPayload",
+ )
+ defer sp.Finish()
+
entries := p.entries.reset()
entriesByStream := make(map[string][]logproto.Entry)
@@ -179,6 +187,14 @@ func (p *Push) buildPayload() ([]byte, error) {
}
streams := make([]logproto.Stream, 0, len(entriesByStream))
+
+ // limit the number of services to log to 1000
+ serviceLimit := len(entriesByStream)
+ if serviceLimit > 1000 {
+ serviceLimit = 1000
+ }
+
+ services := make([]string, 0, serviceLimit)
for s, entries := range entriesByStream {
lbls, err := syntax.ParseLabels(s)
if err != nil {
@@ -190,6 +206,10 @@ func (p *Push) buildPayload() ([]byte, error) {
Entries: entries,
Hash: lbls.Hash(),
})
+
+ if len(services) < serviceLimit {
+ services = append(services, lbls.Get(push.AggregatedMetricLabel))
+ }
}
req := &logproto.PushRequest{
@@ -202,6 +222,14 @@ func (p *Push) buildPayload() ([]byte, error) {
payload = snappy.Encode(nil, payload)
+ sp.LogKV(
+ "event", "build aggregated metrics payload",
+ "num_service", len(entriesByStream),
+ "first_1k_services", strings.Join(services, ","),
+ "num_streams", len(streams),
+ "num_entries", len(entries),
+ )
+
return payload, nil
}
@@ -221,7 +249,7 @@ func (p *Push) run(pushPeriod time.Duration) {
cancel()
return
case <-pushTicker.C:
- payload, err := p.buildPayload()
+ payload, err := p.buildPayload(ctx)
if err != nil {
level.Error(p.logger).Log("msg", "failed to build payload", "err", err)
continue
@@ -265,9 +293,14 @@ func (p *Push) send(ctx context.Context, payload []byte) (int, error) {
err error
resp *http.Response
)
+
// Set a timeout for the request
ctx, cancel := context.WithTimeout(ctx, p.httpClient.Timeout)
defer cancel()
+
+ sp, ctx := opentracing.StartSpanFromContext(ctx, "patternIngester.aggregation.Push.send")
+ defer sp.Finish()
+
req, err := http.NewRequestWithContext(ctx, "POST", p.lokiURL, bytes.NewReader(payload))
if err != nil {
return -1, fmt.Errorf("failed to create push request: %w", err)
diff --git a/pkg/pattern/instance.go b/pkg/pattern/instance.go
index 46c355a9bbcfc..7ec5fd3bdc391 100644
--- a/pkg/pattern/instance.go
+++ b/pkg/pattern/instance.go
@@ -12,6 +12,7 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/multierror"
"github.com/grafana/dskit/ring"
+ "github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
@@ -95,7 +96,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
for _, reqStream := range req.Streams {
// All streams are observed for metrics
// TODO(twhitney): this would be better as a queue that drops in response to backpressure
- i.Observe(reqStream.Labels, reqStream.Entries)
+ i.Observe(ctx, reqStream.Labels, reqStream.Entries)
// But only owned streamed are processed for patterns
ownedStream, err := i.isOwnedStream(i.ingesterID, reqStream.Labels)
@@ -252,10 +253,22 @@ func (i *instance) removeStream(s *stream) {
}
}
-func (i *instance) Observe(stream string, entries []logproto.Entry) {
+func (i *instance) Observe(ctx context.Context, stream string, entries []logproto.Entry) {
i.aggMetricsLock.Lock()
defer i.aggMetricsLock.Unlock()
+ sp, _ := opentracing.StartSpanFromContext(
+ ctx,
+ "patternIngester.Observe",
+ )
+ defer sp.Finish()
+
+ sp.LogKV(
+ "event", "observe stream for metrics",
+ "stream", stream,
+ "entries", len(entries),
+ )
+
for _, entry := range entries {
lvl := constants.LogLevelUnknown
structuredMetadata := logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata)
diff --git a/pkg/pattern/tee_service.go b/pkg/pattern/tee_service.go
index f94893ca6c91d..19584276f39ef 100644
--- a/pkg/pattern/tee_service.go
+++ b/pkg/pattern/tee_service.go
@@ -21,6 +21,8 @@ import (
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/syntax"
+ "github.com/grafana/loki/v3/pkg/runtime"
+ "github.com/grafana/loki/v3/pkg/util/spanlogger"
ring_client "github.com/grafana/dskit/ring/client"
)
@@ -28,6 +30,7 @@ import (
type TeeService struct {
cfg Config
limits Limits
+ tenantCfgs *runtime.TenantConfigs
logger log.Logger
ringClient RingClient
wg *sync.WaitGroup
@@ -51,6 +54,7 @@ func NewTeeService(
cfg Config,
limits Limits,
ringClient RingClient,
+ tenantCfgs *runtime.TenantConfigs,
metricsNamespace string,
registerer prometheus.Registerer,
logger log.Logger,
@@ -86,6 +90,7 @@ func NewTeeService(
),
cfg: cfg,
limits: limits,
+ tenantCfgs: tenantCfgs,
ringClient: ringClient,
wg: &sync.WaitGroup{},
@@ -293,10 +298,11 @@ func (ts *TeeService) sendBatch(ctx context.Context, clientRequest clientRequest
// are gathered by this request
_ = instrument.CollectedRequest(
ctx,
- "FlushTeedLogsToPatternIngested",
+ "FlushTeedLogsToPatternIngester",
ts.sendDuration,
instrument.ErrorCode,
func(ctx context.Context) error {
+ sp := spanlogger.FromContext(ctx)
client, err := ts.ringClient.GetClientFor(clientRequest.ingesterAddr)
if err != nil {
return err
@@ -313,6 +319,41 @@ func (ts *TeeService) sendBatch(ctx context.Context, clientRequest clientRequest
// Success here means the stream will be processed for both metrics and patterns
ts.ingesterAppends.WithLabelValues(clientRequest.ingesterAddr, "success").Inc()
ts.ingesterMetricAppends.WithLabelValues("success").Inc()
+
+ // limit logged labels to 1000
+ labelsLimit := len(req.Streams)
+ if labelsLimit > 1000 {
+ labelsLimit = 1000
+ }
+
+ labels := make([]string, 0, labelsLimit)
+ for _, stream := range req.Streams {
+ if len(labels) >= 1000 {
+ break
+ }
+
+ labels = append(labels, stream.Labels)
+ }
+
+ sp.LogKV(
+ "event", "forwarded push request to pattern ingester",
+ "num_streams", len(req.Streams),
+ "first_1k_labels", strings.Join(labels, ", "),
+ "tenant", clientRequest.tenant,
+ )
+
+ // this is basically the same as logging push request streams,
+ // so put it behind the same flag
+ if ts.tenantCfgs.LogPushRequestStreams(clientRequest.tenant) {
+ level.Debug(ts.logger).
+ Log(
+ "msg", "forwarded push request to pattern ingester",
+ "num_streams", len(req.Streams),
+ "first_1k_labels", strings.Join(labels, ", "),
+ "tenant", clientRequest.tenant,
+ )
+ }
+
return nil
}
diff --git a/pkg/pattern/tee_service_test.go b/pkg/pattern/tee_service_test.go
index 0fb8a032f062a..ed6de3c90ce1a 100644
--- a/pkg/pattern/tee_service_test.go
+++ b/pkg/pattern/tee_service_test.go
@@ -15,6 +15,7 @@ import (
"github.com/grafana/loki/v3/pkg/distributor"
"github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/runtime"
"github.com/grafana/loki/pkg/push"
)
@@ -51,6 +52,7 @@ func getTestTee(t *testing.T) (*TeeService, *mockPoolClient) {
metricAggregationEnabled: true,
},
ringClient,
+ runtime.DefaultTenantConfigs(),
"test",
nil,
log.NewNopLogger(),
|
feat
|
Improve pattern ingester tracing (#14707)
|
c515a4e2b465175dda50c3e334e9a4afa54a4f1e
|
2025-03-12 03:48:44
|
renovate[bot]
|
fix(deps): update dependency next-themes to v0.4.6 (main) (#16697)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index 0853a4736e1bd..8422fc3a057fc 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -5111,9 +5111,9 @@
"license": "MIT"
},
"node_modules/next-themes": {
- "version": "0.4.5",
- "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.4.5.tgz",
- "integrity": "sha512-E8/gYKBxZknOXBiDk/sRokAvkOw35PTUD4Gxtq1eBhd0r4Dx5S42zU65/q8ozR5rcSG2ZlE1E3+ShlUpC7an+A==",
+ "version": "0.4.6",
+ "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.4.6.tgz",
+ "integrity": "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==",
"license": "MIT",
"peerDependencies": {
"react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc",
|
fix
|
update dependency next-themes to v0.4.6 (main) (#16697)
|
2d49c04571b9425b41b7bb9d6868e210590ac51b
|
2025-02-01 02:05:23
|
renovate[bot]
|
chore(deps): update dependency pytz to v2025 (main) (#16025)
| false
|
diff --git a/tools/bigtable-backup/requirements.txt b/tools/bigtable-backup/requirements.txt
index c7bb1c11d1df1..2c55a2546056b 100644
--- a/tools/bigtable-backup/requirements.txt
+++ b/tools/bigtable-backup/requirements.txt
@@ -1,2 +1,2 @@
-e git://github.com/prometheus/client_python.git@a8f5c80f651ea570577c364203e0edbef67db727#egg=prometheus_client
-pytz==2024.2
+pytz==2025.1
|
chore
|
update dependency pytz to v2025 (main) (#16025)
|
00185f8bbb018bd58e9b37b75bca6b669cd39b0a
|
2019-10-18 20:50:42
|
Robert Fratto
|
docs: add exmaple and documentation about using JMESPath literals (#1176)
| false
|
diff --git a/docs/clients/promtail/stages/json.md b/docs/clients/promtail/stages/json.md
index 2f115ab25df84..ba20011ab59d7 100644
--- a/docs/clients/promtail/stages/json.md
+++ b/docs/clients/promtail/stages/json.md
@@ -10,6 +10,10 @@ json:
# Set of key/value pairs of JMESPath expressions. The key will be
# the key in the extracted data while the expression will the value,
# evaluated as a JMESPath from the source data.
+ #
+ # Literal JMESPath expressions can be done by wrapping a key in
+ # double quotes, which then must be wrapped in single quotes in
+ # YAML so they get passed to the JMESPath parser.
expressions:
[ <string>: <string> ... ]
@@ -89,3 +93,33 @@ The second stage will parse the value of `extra` from the extracted data as JSON
and append the following key-value pairs to the set of extracted data:
- `user`: `marco`
+
+### Using a JMESPath Literal
+
+This pipeline uses a literal JMESPath expression to parse JSON fields with
+special characters in the name, like `@` or `.`
+
+For the given pipeline:
+
+```yaml
+- json:
+ expressions:
+ output: log
+ stream: '"grpc.stream"'
+ timestamp: time
+```
+
+Given the following log line:
+
+```
+{"log":"log message\n","grpc.stream":"stderr","time":"2019-04-30T02:12:41.8443515Z"}
+```
+
+The following key-value pairs would be created in the set of extracted data:
+
+- `output`: `log message\n`
+- `stream`: `stderr`
+- `timestamp`: `2019-04-30T02:12:41.8443515`
+
+Note that referring to `grpc.stream` without the combination of double quotes
+wrapped in single quotes will not work properly.
|
docs
|
add exmaple and documentation about using JMESPath literals (#1176)
|
17b2ce0700d631a0e688e34adb68160ed06b0ffd
|
2019-12-06 05:00:51
|
Labesse Kévin
|
documentation: iam requirement for autoscaling (#1350)
| false
|
diff --git a/docs/operations/storage/README.md b/docs/operations/storage/README.md
index 56e92803b1b66..ce38c3ce23cd8 100644
--- a/docs/operations/storage/README.md
+++ b/docs/operations/storage/README.md
@@ -83,7 +83,9 @@ Resources: `arn:aws:dynamodb:<aws_region>:<aws_account_id>:table/<prefix>*`
* `application-autoscaling:DescribeScalableTargets`
* `application-autoscaling:DescribeScalingPolicies`
* `application-autoscaling:RegisterScalableTarget`
+* `application-autoscaling:DeregisterScalableTarget`
* `application-autoscaling:PutScalingPolicy`
+* `application-autoscaling:DeleteScalingPolicy`
Resources: `*`
|
documentation
|
iam requirement for autoscaling (#1350)
|
0f98b45d8dd16ada81ba39c30f27357ddb811c18
|
2023-04-01 01:38:03
|
Andrey Em
|
loki: Add UsageStatsURL to usagestats Config (#8779)
| false
|
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
index 42cd103b9e690..9f02492f99ee3 100644
--- a/docs/sources/configuration/_index.md
+++ b/docs/sources/configuration/_index.md
@@ -2931,6 +2931,10 @@ Configuration for usage report.
# Enable anonymous usage reporting.
# CLI flag: -reporting.enabled
[reporting_enabled: <boolean> | default = true]
+
+# URL to which reports are sent
+# CLI flag: -reporting.usage-stats-url
+[usage_stats_url: <string> | default = "https://stats.grafana.org/loki-usage-report"]
```
### common
diff --git a/pkg/usagestats/reporter.go b/pkg/usagestats/reporter.go
index 2ed35cc4261f0..508380a57bda0 100644
--- a/pkg/usagestats/reporter.go
+++ b/pkg/usagestats/reporter.go
@@ -40,13 +40,15 @@ var (
)
type Config struct {
- Enabled bool `yaml:"reporting_enabled"`
- Leader bool `yaml:"-"`
+ Enabled bool `yaml:"reporting_enabled"`
+ Leader bool `yaml:"-"`
+ UsageStatsURL string `yaml:"usage_stats_url"`
}
// RegisterFlags adds the flags required to config this to the given FlagSet
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.Enabled, "reporting.enabled", true, "Enable anonymous usage reporting.")
+ f.StringVar(&cfg.UsageStatsURL, "reporting.usage-stats-url", usageStatsURL, "URL to which reports are sent")
}
type Reporter struct {
@@ -299,7 +301,7 @@ func (rep *Reporter) reportUsage(ctx context.Context, interval time.Time) error
})
var errs multierror.MultiError
for backoff.Ongoing() {
- if err := sendReport(ctx, rep.cluster, interval); err != nil {
+ if err := sendReport(ctx, rep.cluster, interval, rep.conf.UsageStatsURL); err != nil {
level.Info(rep.logger).Log("msg", "failed to send usage report", "retries", backoff.NumRetries(), "err", err)
errs.Add(err)
backoff.Wait()
diff --git a/pkg/usagestats/reporter_test.go b/pkg/usagestats/reporter_test.go
index e6a0ed9712a3a..5e215401413d5 100644
--- a/pkg/usagestats/reporter_test.go
+++ b/pkg/usagestats/reporter_test.go
@@ -79,14 +79,13 @@ func Test_ReportLoop(t *testing.T) {
clusterIDs = append(clusterIDs, received.ClusterID)
rw.WriteHeader(http.StatusOK)
}))
- usageStatsURL = server.URL
objectClient, err := local.NewFSObjectClient(local.FSConfig{
Directory: t.TempDir(),
})
require.NoError(t, err)
- r, err := NewReporter(Config{Leader: true, Enabled: true}, kv.Config{
+ r, err := NewReporter(Config{Leader: true, Enabled: true, UsageStatsURL: server.URL}, kv.Config{
Store: "inmemory",
}, objectClient, log.NewLogfmtLogger(os.Stdout), prometheus.NewPedanticRegistry())
require.NoError(t, err)
diff --git a/pkg/usagestats/stats.go b/pkg/usagestats/stats.go
index bee9c1f7e0ad7..6db6613d67ca1 100644
--- a/pkg/usagestats/stats.go
+++ b/pkg/usagestats/stats.go
@@ -45,13 +45,13 @@ type Report struct {
}
// sendReport sends the report to the stats server
-func sendReport(ctx context.Context, seed *ClusterSeed, interval time.Time) error {
+func sendReport(ctx context.Context, seed *ClusterSeed, interval time.Time, URL string) error {
report := buildReport(seed, interval)
out, err := jsoniter.MarshalIndent(report, "", " ")
if err != nil {
return err
}
- req, err := http.NewRequest(http.MethodPost, usageStatsURL, bytes.NewBuffer(out))
+ req, err := http.NewRequest(http.MethodPost, URL, bytes.NewBuffer(out))
if err != nil {
return err
}
|
loki
|
Add UsageStatsURL to usagestats Config (#8779)
|
9265b8eef55bb252bb3116d34eabebfa5eb5ed56
|
2022-09-21 20:51:38
|
Jarjar
|
helm: Fix ExternalLicence (#7189)
| false
|
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index b6b7a5cdbf33a..d13a00712f1c7 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.6.1
-version: 3.0.8
+version: 3.0.9
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index 71e9c4802b95e..a7540d00d4428 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/README.md.gotmpl b/production/helm/loki/README.md.gotmpl
index 7c9919b50af18..6aca37204a37e 100644
--- a/production/helm/loki/README.md.gotmpl
+++ b/production/helm/loki/README.md.gotmpl
@@ -29,7 +29,7 @@ As a result of this major change, upgrades from the charts this replaces might b
### Upgrading from `grafana/loki`
-The default installation of `grafana/loki` is a single instance backed by `filesystem` storage that is not highly available. As a result, this upgrade method will involve downtime. The upgrade will involve deleting the previously deployed loki stateful set, the running the `helm upgrade` which will create the new one with the same name, which should attach to the existing PVC or ephemeral storage, thus preserving you data. Will still highly recommend backing up all data before conducting the upgrade.
+The default installation of `grafana/loki` is a single instance backed by `filesystem` storage that is not highly available. As a result, this upgrade method will involve downtime. The upgrade will involve deleting the previously deployed loki stateful set, the running the `helm upgrade` which will create the new one with the same name, which should attach to the existing PVC or ephemeral storage, thus preserving your data. Will still highly recommend backing up all data before conducting the upgrade.
To upgrade, you will need at least the following in your `values.yaml`:
@@ -41,7 +41,7 @@ loki:
type: 'filesystem'
```
-You will need to 1. Update the grafana helm repo, 2. delete the exsiting stateful set, and 3. updgrade making sure to have the values above included in your `values.yaml`. If you installed `grafana/loki` as `loki` in namespace `loki`, the commands would be:
+You will need to 1. Update the grafana helm repo, 2. delete the exsiting stateful set, and 3. upgrade making sure to have the values above included in your `values.yaml`. If you installed `grafana/loki` as `loki` in namespace `loki`, the commands would be:
```console
helm repo update grafana
diff --git a/production/helm/loki/templates/read/statefulset-read.yaml b/production/helm/loki/templates/read/statefulset-read.yaml
index 5e5c24c0e8cfb..bcab2009c8e9c 100644
--- a/production/helm/loki/templates/read/statefulset-read.yaml
+++ b/production/helm/loki/templates/read/statefulset-read.yaml
@@ -129,8 +129,8 @@ spec:
{{- if .Values.enterprise.enabled }}
- name: license
secret:
- {{- if .Values.useExternalLicense }}
- secretName: {{ .Values.externalLicenseName }}
+ {{- if .Values.enterprise.useExternalLicense }}
+ secretName: {{ .Values.enterprise.externalLicenseName }}
{{- else }}
secretName: enterprise-logs-license
{{- end }}
diff --git a/production/helm/loki/templates/secret-license.yaml b/production/helm/loki/templates/secret-license.yaml
index 95f61045da7ae..31af72e80ca2d 100644
--- a/production/helm/loki/templates/secret-license.yaml
+++ b/production/helm/loki/templates/secret-license.yaml
@@ -1,4 +1,4 @@
-{{- if and (not .Values.useExternalLicense) .Values.enterprise.enabled -}}
+{{- if and (not .Values.enterprise.useExternalLicense) .Values.enterprise.enabled -}}
apiVersion: v1
kind: Secret
metadata:
diff --git a/production/helm/loki/templates/single-binary/statefulset.yaml b/production/helm/loki/templates/single-binary/statefulset.yaml
index 149ec570aea9c..0d6c924f7349d 100644
--- a/production/helm/loki/templates/single-binary/statefulset.yaml
+++ b/production/helm/loki/templates/single-binary/statefulset.yaml
@@ -116,8 +116,8 @@ spec:
{{- if .Values.enterprise.enabled }}
- name: license
secret:
- {{- if .Values.useExternalLicense }}
- secretName: {{ .Values.externalLicenseName }}
+ {{- if .Values.enterprise.useExternalLicense }}
+ secretName: {{ .Values.enterprise.externalLicenseName }}
{{- else }}
secretName: enterprise-logs-license
{{- end }}
diff --git a/production/helm/loki/templates/write/statefulset-write.yaml b/production/helm/loki/templates/write/statefulset-write.yaml
index f3cb14d81b1a8..652449bee5172 100644
--- a/production/helm/loki/templates/write/statefulset-write.yaml
+++ b/production/helm/loki/templates/write/statefulset-write.yaml
@@ -117,8 +117,8 @@ spec:
{{- if .Values.enterprise.enabled }}
- name: license
secret:
- {{- if .Values.useExternalLicense }}
- secretName: {{ .Values.externalLicenseName }}
+ {{- if .Values.enterprise.useExternalLicense }}
+ secretName: {{ .Values.enterprise.externalLicenseName }}
{{- else }}
secretName: enterprise-logs-license
{{- end }}
|
helm
|
Fix ExternalLicence (#7189)
|
bcdc807dd34d7a4e2e1dcfdffecd869da66e6efe
|
2023-06-30 00:27:20
|
Periklis Tsirakidis
|
operator: Expose limits config setting cardinality_limit (#9830)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 6ffa62f9dd0ab..7f7a7269c5c51 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [9830](https://github.com/grafana/loki/pull/9830) **periklis**: Expose limits config setting cardinality_limit
- [9600](https://github.com/grafana/loki/pull/9600) **periklis**: Add rules labels filters for openshift-logging application tenant
- [9735](https://github.com/grafana/loki/pull/9735) **JoaoBraveCoding** Adjust 1x.extra-small resources according to findings
- [9689](https://github.com/grafana/loki/pull/9689) **xperimental**: Fix availability of demo LokiStack size
diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go
index 70ed9bccd776c..21e8fde4e67e9 100644
--- a/operator/apis/loki/v1/lokistack_types.go
+++ b/operator/apis/loki/v1/lokistack_types.go
@@ -564,6 +564,13 @@ type QueryLimitSpec struct {
// +kubebuilder:default:="3m"
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Timeout"
QueryTimeout string `json:"queryTimeout,omitempty"`
+
+ // CardinalityLimit defines the cardinality limit for index queries.
+ //
+ // +optional
+ // +kubebuilder:validation:Optional
+ // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Cardinality Limit"
+ CardinalityLimit int32 `json:"cardinalityLimit,omitempty"`
}
// IngestionLimitSpec defines the limits applied at the ingestion path.
diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
index 806029fbc71af..e3ec545ee3de9 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:main-ac1c1fd
- createdAt: "2023-06-12T17:36:46Z"
+ createdAt: "2023-06-29T17:58:18Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
@@ -341,6 +341,11 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.global.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
@@ -420,6 +425,11 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.tenants.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
index 14cb9648ed084..d6ffdede14083 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -145,6 +145,11 @@ spec:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
+ cardinalityLimit:
+ description: CardinalityLimit defines the cardinality
+ limit for index queries.
+ format: int32
+ type: integer
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
@@ -267,6 +272,11 @@ spec:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
+ cardinalityLimit:
+ description: CardinalityLimit defines the cardinality
+ limit for index queries.
+ format: int32
+ type: integer
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
index e81f5bc1e47bc..aead97e4824b1 100644
--- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:main-ac1c1fd
- createdAt: "2023-06-12T17:36:41Z"
+ createdAt: "2023-06-29T17:58:15Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
@@ -341,6 +341,11 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.global.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
@@ -420,6 +425,11 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.tenants.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
index 780822dd489cb..4c933b347ee89 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
@@ -145,6 +145,11 @@ spec:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
+ cardinalityLimit:
+ description: CardinalityLimit defines the cardinality
+ limit for index queries.
+ format: int32
+ type: integer
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
@@ -267,6 +272,11 @@ spec:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
+ cardinalityLimit:
+ description: CardinalityLimit defines the cardinality
+ limit for index queries.
+ format: int32
+ type: integer
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index 0fe1378643fa6..14a37978a5f21 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:v0.1.0
- createdAt: "2023-06-12T17:36:51Z"
+ createdAt: "2023-06-29T17:58:20Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
@@ -354,6 +354,11 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.global.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
@@ -433,6 +438,11 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.tenants.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
index bde43a4b27f5a..ae149b380d0cc 100644
--- a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -145,6 +145,11 @@ spec:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
+ cardinalityLimit:
+ description: CardinalityLimit defines the cardinality
+ limit for index queries.
+ format: int32
+ type: integer
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
@@ -267,6 +272,11 @@ spec:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
+ cardinalityLimit:
+ description: CardinalityLimit defines the cardinality
+ limit for index queries.
+ format: int32
+ type: integer
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
index 78e49cd1d0a28..b56e02fd9b9d4 100644
--- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
@@ -128,6 +128,11 @@ spec:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
+ cardinalityLimit:
+ description: CardinalityLimit defines the cardinality
+ limit for index queries.
+ format: int32
+ type: integer
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
@@ -250,6 +255,11 @@ spec:
description: QueryLimits defines the limit applied on querying
log streams.
properties:
+ cardinalityLimit:
+ description: CardinalityLimit defines the cardinality
+ limit for index queries.
+ format: int32
+ type: integer
maxChunksPerQuery:
description: MaxChunksPerQuery defines the maximum number
of chunks that can be fetched by a single query.
diff --git a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
index ad8e8b303d1f9..e5cb07855cd3a 100644
--- a/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community-openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -254,6 +254,11 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.global.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
@@ -333,6 +338,11 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.tenants.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
diff --git a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
index cef4c8043c7d6..209eabc5d2bd7 100644
--- a/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/community/bases/loki-operator.clusterserviceversion.yaml
@@ -254,6 +254,11 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.global.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
@@ -333,6 +338,11 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.tenants.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
diff --git a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
index da6266ec0a293..5ca4fdffc6ea7 100644
--- a/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
+++ b/operator/config/manifests/openshift/bases/loki-operator.clusterserviceversion.yaml
@@ -266,6 +266,11 @@ spec:
path: limits.global.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.global.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
@@ -345,6 +350,11 @@ spec:
path: limits.tenants.ingestion.perStreamRateLimitBurst
x-descriptors:
- urn:alm:descriptor:com.tectonic.ui:number
+ - description: CardinalityLimit defines the cardinality limit for index queries.
+ displayName: Cardinality Limit
+ path: limits.tenants.queries.cardinalityLimit
+ x-descriptors:
+ - urn:alm:descriptor:com.tectonic.ui:number
- description: MaxChunksPerQuery defines the maximum number of chunks that can
be fetched by a single query.
displayName: Max Chunk per Query
diff --git a/operator/docs/operator/api.md b/operator/docs/operator/api.md
index 8fa7cd56ae0d7..17939f6712fe8 100644
--- a/operator/docs/operator/api.md
+++ b/operator/docs/operator/api.md
@@ -2641,6 +2641,18 @@ string
<p>Timeout when querying ingesters or storage during the execution of a query request.</p>
</td>
</tr>
+<tr>
+<td>
+<code>cardinalityLimit</code><br/>
+<em>
+int32
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>CardinalityLimit defines the cardinality limit for index queries.</p>
+</td>
+</tr>
</tbody>
</table>
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 659a9be0fd335..dfdd0b45a155e 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -198,6 +198,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "1m",
+ CardinalityLimit: 100000,
},
},
},
@@ -454,6 +455,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "1m",
+ CardinalityLimit: 100000,
},
},
Tenants: map[string]lokiv1.LimitsTemplateSpec{
@@ -863,6 +865,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "1m",
+ CardinalityLimit: 100000,
},
},
},
@@ -1215,6 +1218,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "1m",
+ CardinalityLimit: 100000,
},
},
},
@@ -1581,6 +1585,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "1m",
+ CardinalityLimit: 100000,
},
},
},
@@ -1915,6 +1920,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "1m",
+ CardinalityLimit: 100000,
},
Retention: &lokiv1.RetentionLimitSpec{
Days: 15,
@@ -2305,6 +2311,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "2m",
+ CardinalityLimit: 100000,
},
},
},
@@ -2696,6 +2703,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "1m",
+ CardinalityLimit: 100000,
},
},
},
@@ -3086,6 +3094,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "2m",
+ CardinalityLimit: 100000,
},
},
},
@@ -3470,6 +3479,7 @@ overrides:
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "1m",
+ CardinalityLimit: 100000,
},
},
},
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index fd296a18d9d13..71748b64ad68c 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -162,7 +162,7 @@ limits_config:
max_query_length: 721h
max_query_parallelism: 32
max_query_series: {{ .Stack.Limits.Global.QueryLimits.MaxQuerySeries }}
- cardinality_limit: 100000
+ cardinality_limit: {{ .Stack.Limits.Global.QueryLimits.CardinalityLimit }}
max_streams_matchers_per_query: 1000
query_timeout: {{ .Stack.Limits.Global.QueryLimits.QueryTimeout }}
{{- if .Retention.Enabled }}{{- with .Stack.Limits.Global.Retention }}
diff --git a/operator/internal/manifests/internal/config/loki-runtime-config.yaml b/operator/internal/manifests/internal/config/loki-runtime-config.yaml
index 524eb3970226d..ca62d0a783db9 100644
--- a/operator/internal/manifests/internal/config/loki-runtime-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-runtime-config.yaml
@@ -46,6 +46,9 @@ overrides:
{{- if $spec.QueryLimits.QueryTimeout }}
query_timeout: {{ $spec.QueryLimits.QueryTimeout }}
{{- end }}
+ {{- if $spec.QueryLimits.CardinalityLimit }}
+ cardinality_limit: {{ $spec.QueryLimits.CardinalityLimit }}
+ {{- end }}
{{- end -}}
{{- with $spec.Retention }}
retention_period: {{ .Days }}d
diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go
index 03915b198908f..dd56b2b0cb001 100644
--- a/operator/internal/manifests/internal/sizes.go
+++ b/operator/internal/manifests/internal/sizes.go
@@ -250,6 +250,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "3m",
+ CardinalityLimit: 100000,
},
},
},
@@ -304,6 +305,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "3m",
+ CardinalityLimit: 100000,
},
},
},
@@ -361,6 +363,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "3m",
+ CardinalityLimit: 100000,
},
},
},
@@ -418,6 +421,7 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
MaxChunksPerQuery: 2000000,
MaxQuerySeries: 500,
QueryTimeout: "3m",
+ CardinalityLimit: 100000,
},
},
},
|
operator
|
Expose limits config setting cardinality_limit (#9830)
|
65f90c75b501a9c8394b9d526db69c90d0aa2636
|
2025-03-18 06:06:13
|
renovate[bot]
|
fix(deps): update module go.opentelemetry.io/collector/pdata to v1.28.0 (main) (#16797)
| false
|
diff --git a/go.mod b/go.mod
index a2ec8739ecf34..c077c50916bbd 100644
--- a/go.mod
+++ b/go.mod
@@ -150,7 +150,7 @@ require (
github.com/twmb/franz-go/plugin/kotel v1.5.0
github.com/twmb/franz-go/plugin/kprom v1.1.0
github.com/willf/bloom v2.0.3+incompatible
- go.opentelemetry.io/collector/pdata v1.27.0
+ go.opentelemetry.io/collector/pdata v1.28.0
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f
golang.org/x/oauth2 v0.28.0
golang.org/x/text v0.23.0
diff --git a/go.sum b/go.sum
index fe57a709632e9..64f8c8c6a8100 100644
--- a/go.sum
+++ b/go.sum
@@ -1296,8 +1296,8 @@ go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0
go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ=
go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY=
go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg=
-go.opentelemetry.io/collector/pdata v1.27.0 h1:66yI7FYkUDia74h48Fd2/KG2Vk8DxZnGw54wRXykCEU=
-go.opentelemetry.io/collector/pdata v1.27.0/go.mod h1:18e8/xDZsqyj00h/5HM5GLdJgBzzG9Ei8g9SpNoiMtI=
+go.opentelemetry.io/collector/pdata v1.28.0 h1:xSZyvTOOc2Wmz4PoxrVqeQfodLgs9k7gowLAnzZN0eU=
+go.opentelemetry.io/collector/pdata v1.28.0/go.mod h1:asKE8MD/4SOKz1mCrGdAz4VO2U2HUNg8A6094uK7pq0=
go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8=
go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ=
go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM=
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
index cbb64987d2baf..f730a185ca8b6 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go
@@ -7,6 +7,9 @@
package pcommon
import (
+ "iter"
+ "slices"
+
"go.opentelemetry.io/collector/pdata/internal"
)
@@ -55,6 +58,17 @@ func (ms ByteSlice) At(i int) byte {
return (*ms.getOrig())[i]
}
+// All returns an iterator over index-value pairs in the slice.
+func (ms ByteSlice) All() iter.Seq2[int, byte] {
+ return func(yield func(int, byte) bool) {
+ for i := 0; i < ms.Len(); i++ {
+ if !yield(i, ms.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// SetAt sets byte item at particular index.
// Equivalent of byteSlice[i] = val
func (ms ByteSlice) SetAt(i int, val byte) {
@@ -102,6 +116,11 @@ func (ms ByteSlice) CopyTo(dest ByteSlice) {
*dest.getOrig() = copyByteSlice(*dest.getOrig(), *ms.getOrig())
}
+// Equal checks equality with another ByteSlice
+func (ms ByteSlice) Equal(val ByteSlice) bool {
+ return slices.Equal(*ms.getOrig(), *val.getOrig())
+}
+
func copyByteSlice(dst, src []byte) []byte {
dst = dst[:0]
return append(dst, src...)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
index 83a07ccf483db..7cd0a7b63a2b4 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go
@@ -7,6 +7,9 @@
package pcommon
import (
+ "iter"
+ "slices"
+
"go.opentelemetry.io/collector/pdata/internal"
)
@@ -55,6 +58,17 @@ func (ms Float64Slice) At(i int) float64 {
return (*ms.getOrig())[i]
}
+// All returns an iterator over index-value pairs in the slice.
+func (ms Float64Slice) All() iter.Seq2[int, float64] {
+ return func(yield func(int, float64) bool) {
+ for i := 0; i < ms.Len(); i++ {
+ if !yield(i, ms.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// SetAt sets float64 item at particular index.
// Equivalent of float64Slice[i] = val
func (ms Float64Slice) SetAt(i int, val float64) {
@@ -102,6 +116,11 @@ func (ms Float64Slice) CopyTo(dest Float64Slice) {
*dest.getOrig() = copyFloat64Slice(*dest.getOrig(), *ms.getOrig())
}
+// Equal checks equality with another Float64Slice
+func (ms Float64Slice) Equal(val Float64Slice) bool {
+ return slices.Equal(*ms.getOrig(), *val.getOrig())
+}
+
func copyFloat64Slice(dst, src []float64) []float64 {
dst = dst[:0]
return append(dst, src...)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
index 35a40bd079cf1..a8ac6a3b4f828 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go
@@ -7,6 +7,9 @@
package pcommon
import (
+ "iter"
+ "slices"
+
"go.opentelemetry.io/collector/pdata/internal"
)
@@ -55,6 +58,17 @@ func (ms Int32Slice) At(i int) int32 {
return (*ms.getOrig())[i]
}
+// All returns an iterator over index-value pairs in the slice.
+func (ms Int32Slice) All() iter.Seq2[int, int32] {
+ return func(yield func(int, int32) bool) {
+ for i := 0; i < ms.Len(); i++ {
+ if !yield(i, ms.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// SetAt sets int32 item at particular index.
// Equivalent of int32Slice[i] = val
func (ms Int32Slice) SetAt(i int, val int32) {
@@ -102,6 +116,11 @@ func (ms Int32Slice) CopyTo(dest Int32Slice) {
*dest.getOrig() = copyInt32Slice(*dest.getOrig(), *ms.getOrig())
}
+// Equal checks equality with another Int32Slice
+func (ms Int32Slice) Equal(val Int32Slice) bool {
+ return slices.Equal(*ms.getOrig(), *val.getOrig())
+}
+
func copyInt32Slice(dst, src []int32) []int32 {
dst = dst[:0]
return append(dst, src...)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
index e50cd3cc3a52e..a8e5f453b75a3 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int64slice.go
@@ -7,6 +7,9 @@
package pcommon
import (
+ "iter"
+ "slices"
+
"go.opentelemetry.io/collector/pdata/internal"
)
@@ -55,6 +58,17 @@ func (ms Int64Slice) At(i int) int64 {
return (*ms.getOrig())[i]
}
+// All returns an iterator over index-value pairs in the slice.
+func (ms Int64Slice) All() iter.Seq2[int, int64] {
+ return func(yield func(int, int64) bool) {
+ for i := 0; i < ms.Len(); i++ {
+ if !yield(i, ms.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// SetAt sets int64 item at particular index.
// Equivalent of int64Slice[i] = val
func (ms Int64Slice) SetAt(i int, val int64) {
@@ -102,6 +116,11 @@ func (ms Int64Slice) CopyTo(dest Int64Slice) {
*dest.getOrig() = copyInt64Slice(*dest.getOrig(), *ms.getOrig())
}
+// Equal checks equality with another Int64Slice
+func (ms Int64Slice) Equal(val Int64Slice) bool {
+ return slices.Equal(*ms.getOrig(), *val.getOrig())
+}
+
func copyInt64Slice(dst, src []int64) []int64 {
dst = dst[:0]
return append(dst, src...)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
index 02a75c7a65c23..b165645708c59 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_stringslice.go
@@ -7,6 +7,9 @@
package pcommon
import (
+ "iter"
+ "slices"
+
"go.opentelemetry.io/collector/pdata/internal"
)
@@ -55,6 +58,17 @@ func (ms StringSlice) At(i int) string {
return (*ms.getOrig())[i]
}
+// All returns an iterator over index-value pairs in the slice.
+func (ms StringSlice) All() iter.Seq2[int, string] {
+ return func(yield func(int, string) bool) {
+ for i := 0; i < ms.Len(); i++ {
+ if !yield(i, ms.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// SetAt sets string item at particular index.
// Equivalent of stringSlice[i] = val
func (ms StringSlice) SetAt(i int, val string) {
@@ -102,6 +116,11 @@ func (ms StringSlice) CopyTo(dest StringSlice) {
*dest.getOrig() = copyStringSlice(*dest.getOrig(), *ms.getOrig())
}
+// Equal checks equality with another StringSlice
+func (ms StringSlice) Equal(val StringSlice) bool {
+ return slices.Equal(*ms.getOrig(), *val.getOrig())
+}
+
func copyStringSlice(dst, src []string) []string {
dst = dst[:0]
return append(dst, src...)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
index 1344ca35bcf40..937d64e4a4027 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go
@@ -7,6 +7,9 @@
package pcommon
import (
+ "iter"
+ "slices"
+
"go.opentelemetry.io/collector/pdata/internal"
)
@@ -55,6 +58,17 @@ func (ms UInt64Slice) At(i int) uint64 {
return (*ms.getOrig())[i]
}
+// All returns an iterator over index-value pairs in the slice.
+func (ms UInt64Slice) All() iter.Seq2[int, uint64] {
+ return func(yield func(int, uint64) bool) {
+ for i := 0; i < ms.Len(); i++ {
+ if !yield(i, ms.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// SetAt sets uint64 item at particular index.
// Equivalent of uInt64Slice[i] = val
func (ms UInt64Slice) SetAt(i int, val uint64) {
@@ -102,6 +116,11 @@ func (ms UInt64Slice) CopyTo(dest UInt64Slice) {
*dest.getOrig() = copyUInt64Slice(*dest.getOrig(), *ms.getOrig())
}
+// Equal checks equality with another UInt64Slice
+func (ms UInt64Slice) Equal(val UInt64Slice) bool {
+ return slices.Equal(*ms.getOrig(), *val.getOrig())
+}
+
func copyUInt64Slice(dst, src []uint64) []uint64 {
dst = dst[:0]
return append(dst, src...)
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
index 0b7b2fd442066..ccbf3a55d7328 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go
@@ -4,6 +4,8 @@
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
+ "iter"
+
"go.uber.org/multierr"
"go.opentelemetry.io/collector/pdata/internal"
@@ -225,6 +227,22 @@ func (m Map) Range(f func(k string, v Value) bool) {
}
}
+// All returns an iterator over key-value pairs in the Map.
+//
+// for k, v := range es.All() {
+// ... // Do something with key-value pair
+// }
+func (m Map) All() iter.Seq2[string, Value] {
+ return func(yield func(string, Value) bool) {
+ for i := range *m.getOrig() {
+ kv := &(*m.getOrig())[i]
+ if !yield(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) {
+ return
+ }
+ }
+ }
+}
+
// MoveTo moves all key/values from the current map overriding the destination and
// resetting the current instance to its zero value
func (m Map) MoveTo(dest Map) {
@@ -290,3 +308,26 @@ func (m Map) FromRaw(rawMap map[string]any) error {
*m.getOrig() = origs
return errs
}
+
+// Equal checks equality with another Map
+func (m Map) Equal(val Map) bool {
+ if m.Len() != val.Len() {
+ return false
+ }
+
+ fullEqual := true
+
+ m.Range(func(k string, v Value) bool {
+ vv, ok := val.Get(k)
+ if !ok {
+ fullEqual = false
+ return fullEqual
+ }
+
+ if !v.Equal(vv) {
+ fullEqual = false
+ }
+ return fullEqual
+ })
+ return fullEqual
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
index 7434f467aed6a..7e8037df3226e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go
@@ -4,6 +4,8 @@
package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon"
import (
+ "iter"
+
"go.uber.org/multierr"
"go.opentelemetry.io/collector/pdata/internal"
@@ -58,6 +60,21 @@ func (es Slice) At(ix int) Value {
return newValue(&(*es.getOrig())[ix], es.getState())
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es Slice) All() iter.Seq2[int, Value] {
+ return func(yield func(int, Value) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// CopyTo copies all elements from the current slice overriding the destination.
func (es Slice) CopyTo(dest Slice) {
dest.getState().AssertMutable()
@@ -164,3 +181,17 @@ func (es Slice) FromRaw(rawSlice []any) error {
*es.getOrig() = origs
return errs
}
+
+// Equal checks equality with another Slice
+func (es Slice) Equal(val Slice) bool {
+ if es.Len() != val.Len() {
+ return false
+ }
+
+ for i := 0; i < es.Len(); i++ {
+ if !es.At(i).Equal(val.At(i)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
index 73a95bcf2e280..5c7972ced87e4 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
@@ -452,6 +452,33 @@ func (v Value) AsRaw() any {
return fmt.Sprintf("<Unknown OpenTelemetry value type %q>", v.Type())
}
+func (v Value) Equal(c Value) bool {
+ if v.Type() != c.Type() {
+ return false
+ }
+
+ switch v.Type() {
+ case ValueTypeEmpty:
+ return true
+ case ValueTypeStr:
+ return v.Str() == c.Str()
+ case ValueTypeBool:
+ return v.Bool() == c.Bool()
+ case ValueTypeDouble:
+ return v.Double() == c.Double()
+ case ValueTypeInt:
+ return v.Int() == c.Int()
+ case ValueTypeBytes:
+ return v.Bytes().Equal(c.Bytes())
+ case ValueTypeMap:
+ return v.Map().Equal(c.Map())
+ case ValueTypeSlice:
+ return v.Slice().Equal(c.Slice())
+ }
+
+ return false
+}
+
func newKeyValueString(k string, v string) otlpcommon.KeyValue {
orig := otlpcommon.KeyValue{Key: k}
state := internal.StateMutable
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go
index a900b4e1c7ef3..771233711c039 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecordslice.go
@@ -7,6 +7,7 @@
package plog
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es LogRecordSlice) At(i int) LogRecord {
return newLogRecord((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es LogRecordSlice) All() iter.Seq2[int, LogRecord] {
+ return func(yield func(int, LogRecord) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go
index d2fc54de80bab..3e574539ed55e 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_resourcelogsslice.go
@@ -7,6 +7,7 @@
package plog
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es ResourceLogsSlice) At(i int) ResourceLogs {
return newResourceLogs((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ResourceLogsSlice) All() iter.Seq2[int, ResourceLogs] {
+ return func(yield func(int, ResourceLogs) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go
index 5bae8d9f9c95a..b4791995846e3 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_scopelogsslice.go
@@ -7,6 +7,7 @@
package plog
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es ScopeLogsSlice) At(i int) ScopeLogs {
return newScopeLogs((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ScopeLogsSlice) All() iter.Seq2[int, ScopeLogs] {
+ return func(yield func(int, ScopeLogs) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go
index 15d70a6edeb4c..4ee31367fc426 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go
@@ -7,6 +7,8 @@
package pmetric
import (
+ "iter"
+
"go.opentelemetry.io/collector/pdata/internal"
otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1"
)
@@ -54,6 +56,21 @@ func (es ExemplarSlice) At(i int) Exemplar {
return newExemplar(&(*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ExemplarSlice) All() iter.Seq2[int, Exemplar] {
+ return func(yield func(int, Exemplar) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go
index a466a7c185b40..e37ccd9b67b20 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go
@@ -7,6 +7,7 @@
package pmetric
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es ExponentialHistogramDataPointSlice) At(i int) ExponentialHistogramDataP
return newExponentialHistogramDataPoint((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ExponentialHistogramDataPointSlice) All() iter.Seq2[int, ExponentialHistogramDataPoint] {
+ return func(yield func(int, ExponentialHistogramDataPoint) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go
index 7ee6ef737f83a..ac9e49671095f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go
@@ -7,6 +7,7 @@
package pmetric
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es HistogramDataPointSlice) At(i int) HistogramDataPoint {
return newHistogramDataPoint((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es HistogramDataPointSlice) All() iter.Seq2[int, HistogramDataPoint] {
+ return func(yield func(int, HistogramDataPoint) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go
index 13f05a0ecbc2b..bf77dbaf77d66 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go
@@ -7,6 +7,7 @@
package pmetric
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es MetricSlice) At(i int) Metric {
return newMetric((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es MetricSlice) All() iter.Seq2[int, Metric] {
+ return func(yield func(int, Metric) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go
index 57cdd11743ea8..89a82d28297b7 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go
@@ -7,6 +7,7 @@
package pmetric
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es NumberDataPointSlice) At(i int) NumberDataPoint {
return newNumberDataPoint((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es NumberDataPointSlice) All() iter.Seq2[int, NumberDataPoint] {
+ return func(yield func(int, NumberDataPoint) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go
index 55217ea27d577..a8605660c77d5 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go
@@ -7,6 +7,7 @@
package pmetric
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es ResourceMetricsSlice) At(i int) ResourceMetrics {
return newResourceMetrics((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ResourceMetricsSlice) All() iter.Seq2[int, ResourceMetrics] {
+ return func(yield func(int, ResourceMetrics) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go
index a86eb0b48154a..9514a30301c65 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go
@@ -7,6 +7,7 @@
package pmetric
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es ScopeMetricsSlice) At(i int) ScopeMetrics {
return newScopeMetrics((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ScopeMetricsSlice) All() iter.Seq2[int, ScopeMetrics] {
+ return func(yield func(int, ScopeMetrics) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go
index f915500963f35..2ff9ceb0c096f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go
@@ -7,6 +7,7 @@
package pmetric
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es SummaryDataPointSlice) At(i int) SummaryDataPoint {
return newSummaryDataPoint((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es SummaryDataPointSlice) All() iter.Seq2[int, SummaryDataPoint] {
+ return func(yield func(int, SummaryDataPoint) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go
index ed899050ac680..6a82f5d906de5 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go
@@ -7,6 +7,7 @@
package pmetric
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es SummaryDataPointValueAtQuantileSlice) At(i int) SummaryDataPointValueAt
return newSummaryDataPointValueAtQuantile((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es SummaryDataPointValueAtQuantileSlice) All() iter.Seq2[int, SummaryDataPointValueAtQuantile] {
+ return func(yield func(int, SummaryDataPointValueAtQuantile) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go
index 580f555d7a9cd..775a96f6a7ed2 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go
@@ -22,6 +22,34 @@ func (e *ProtoMarshaler) MetricsSize(md Metrics) int {
return pb.Size()
}
+func (e *ProtoMarshaler) ResourceMetricsSize(rm ResourceMetrics) int {
+ return rm.orig.Size()
+}
+
+func (e *ProtoMarshaler) ScopeMetricsSize(sm ScopeMetrics) int {
+ return sm.orig.Size()
+}
+
+func (e *ProtoMarshaler) MetricSize(m Metric) int {
+ return m.orig.Size()
+}
+
+func (e *ProtoMarshaler) NumberDataPointSize(ndp NumberDataPoint) int {
+ return ndp.orig.Size()
+}
+
+func (e *ProtoMarshaler) SummaryDataPointSize(sdps SummaryDataPoint) int {
+ return sdps.orig.Size()
+}
+
+func (e *ProtoMarshaler) HistogramDataPointSize(hdp HistogramDataPoint) int {
+ return hdp.orig.Size()
+}
+
+func (e *ProtoMarshaler) ExponentialHistogramDataPointSize(ehdp ExponentialHistogramDataPoint) int {
+ return ehdp.orig.Size()
+}
+
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) {
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
index da79ef4a342a7..351978c78fd32 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_resourcespansslice.go
@@ -7,6 +7,7 @@
package ptrace
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es ResourceSpansSlice) At(i int) ResourceSpans {
return newResourceSpans((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ResourceSpansSlice) All() iter.Seq2[int, ResourceSpans] {
+ return func(yield func(int, ResourceSpans) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
index 8fd0b4b8e9995..0050b91795975 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_scopespansslice.go
@@ -7,6 +7,7 @@
package ptrace
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es ScopeSpansSlice) At(i int) ScopeSpans {
return newScopeSpans((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es ScopeSpansSlice) All() iter.Seq2[int, ScopeSpans] {
+ return func(yield func(int, ScopeSpans) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
index ffde17c83a2e1..bed84fe0bcee7 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spaneventslice.go
@@ -7,6 +7,7 @@
package ptrace
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es SpanEventSlice) At(i int) SpanEvent {
return newSpanEvent((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es SpanEventSlice) All() iter.Seq2[int, SpanEvent] {
+ return func(yield func(int, SpanEvent) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
index 164038b8bed8b..79a8ad3bc513a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanlinkslice.go
@@ -7,6 +7,7 @@
package ptrace
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es SpanLinkSlice) At(i int) SpanLink {
return newSpanLink((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es SpanLinkSlice) All() iter.Seq2[int, SpanLink] {
+ return func(yield func(int, SpanLink) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
index 654a547523f7a..e250b8a4acd1a 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/generated_spanslice.go
@@ -7,6 +7,7 @@
package ptrace
import (
+ "iter"
"sort"
"go.opentelemetry.io/collector/pdata/internal"
@@ -56,6 +57,21 @@ func (es SpanSlice) At(i int) Span {
return newSpan((*es.orig)[i], es.state)
}
+// All returns an iterator over index-value pairs in the slice.
+//
+// for i, v := range es.All() {
+// ... // Do something with index-value pair
+// }
+func (es SpanSlice) All() iter.Seq2[int, Span] {
+ return func(yield func(int, Span) bool) {
+ for i := 0; i < es.Len(); i++ {
+ if !yield(i, es.At(i)) {
+ return
+ }
+ }
+ }
+}
+
// EnsureCapacity is an operation that ensures the slice has at least the specified capacity.
// 1. If the newCap <= cap then no change in capacity.
// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap.
diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
index e0b2168884a38..a3c78be27c13f 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/pb.go
@@ -22,6 +22,18 @@ func (e *ProtoMarshaler) TracesSize(td Traces) int {
return pb.Size()
}
+func (e *ProtoMarshaler) ResourceSpansSize(rs ResourceSpans) int {
+ return rs.orig.Size()
+}
+
+func (e *ProtoMarshaler) ScopeSpansSize(ss ScopeSpans) int {
+ return ss.orig.Size()
+}
+
+func (e *ProtoMarshaler) SpanSize(span Span) int {
+ return span.orig.Size()
+}
+
type ProtoUnmarshaler struct{}
func (d *ProtoUnmarshaler) UnmarshalTraces(buf []byte) (Traces, error) {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b4984fa709dbd..2b545378a9cb9 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1913,7 +1913,7 @@ go.opentelemetry.io/collector/config/configtelemetry
## explicit; go 1.22.0
go.opentelemetry.io/collector/consumer
go.opentelemetry.io/collector/consumer/internal
-# go.opentelemetry.io/collector/pdata v1.27.0
+# go.opentelemetry.io/collector/pdata v1.28.0
## explicit; go 1.23.0
go.opentelemetry.io/collector/pdata/internal
go.opentelemetry.io/collector/pdata/internal/data
|
fix
|
update module go.opentelemetry.io/collector/pdata to v1.28.0 (main) (#16797)
|
142d3564e613557ff5deaf11d48c8e7fc6db8580
|
2024-01-08 22:37:14
|
zry98
|
promtail: show a clearer reason in "disable watchConfig" log message when server is disabled (#11420)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bd48775e19275..2ff7b66890f46 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -77,6 +77,7 @@
* [10677](https://github.com/grafana/loki/pull/10677) **chaudum** Remove deprecated `stream_lag_labels` setting from both the `options` and `client` configuration sections.
* [10689](https://github.com/grafana/loki/pull/10689) **dylanguedes**: Ingester: Make jitter to be 20% of flush check period instead of 1%.
+* [11420](https://github.com/grafana/loki/pull/11420) **zry98**: Show a clearer reason in "disable watchConfig" log message when server is disabled.
##### Fixes
diff --git a/clients/pkg/promtail/promtail.go b/clients/pkg/promtail/promtail.go
index 1586e2d971153..1ef3368a697e5 100644
--- a/clients/pkg/promtail/promtail.go
+++ b/clients/pkg/promtail/promtail.go
@@ -255,25 +255,29 @@ func (p *Promtail) watchConfig() {
level.Warn(p.logger).Log("msg", "disable watchConfig", "reason", "Promtail newConfig func is Empty")
return
}
- promtailServer, ok := p.server.(*server.PromtailServer)
- if !ok {
- level.Warn(p.logger).Log("msg", "disable watchConfig", "reason", "promtailServer cast fail")
+ switch srv := p.server.(type) {
+ case *server.NoopServer:
+ level.Warn(p.logger).Log("msg", "disable watchConfig", "reason", "Promtail server is disabled")
return
- }
- level.Warn(p.logger).Log("msg", "enable watchConfig")
- hup := make(chan os.Signal, 1)
- signal.Notify(hup, syscall.SIGHUP)
- for {
- select {
- case <-hup:
- _ = p.reload()
- case rc := <-promtailServer.Reload():
- if err := p.reload(); err != nil {
- rc <- err
- } else {
- rc <- nil
+ case *server.PromtailServer:
+ level.Warn(p.logger).Log("msg", "enable watchConfig")
+ hup := make(chan os.Signal, 1)
+ signal.Notify(hup, syscall.SIGHUP)
+ for {
+ select {
+ case <-hup:
+ _ = p.reload()
+ case rc := <-srv.Reload():
+ if err := p.reload(); err != nil {
+ rc <- err
+ } else {
+ rc <- nil
+ }
}
}
+ default:
+ level.Warn(p.logger).Log("msg", "disable watchConfig", "reason", "Unknown Promtail server type")
+ return
}
}
diff --git a/clients/pkg/promtail/server/server.go b/clients/pkg/promtail/server/server.go
index 4eb61361d10a0..1b47247630e05 100644
--- a/clients/pkg/promtail/server/server.go
+++ b/clients/pkg/promtail/server/server.go
@@ -321,25 +321,25 @@ func computeExternalURL(u string, port int) (*url.URL, error) {
return eu, nil
}
-type noopServer struct {
+type NoopServer struct {
log log.Logger
sigs chan os.Signal
}
-func newNoopServer(log log.Logger) *noopServer {
- return &noopServer{
+func newNoopServer(log log.Logger) *NoopServer {
+ return &NoopServer{
log: log,
sigs: make(chan os.Signal, 1),
}
}
-func (s *noopServer) Run() error {
+func (s *NoopServer) Run() error {
signal.Notify(s.sigs, syscall.SIGINT, syscall.SIGTERM)
sig := <-s.sigs
level.Info(s.log).Log("msg", "received shutdown signal", "sig", sig)
return nil
}
-func (s *noopServer) Shutdown() {
+func (s *NoopServer) Shutdown() {
s.sigs <- syscall.SIGTERM
}
|
promtail
|
show a clearer reason in "disable watchConfig" log message when server is disabled (#11420)
|
5bb928e4955bf3f6e1564ce9ef74c20e81efa2e0
|
2024-08-08 18:22:50
|
Paul Rogers
|
chore: Turn off renovate on non-main branches (#13803)
| false
|
diff --git a/.github/renovate.json b/.github/renovate.json
index f817e9acb8ca7..e0a417fddf132 100644
--- a/.github/renovate.json
+++ b/.github/renovate.json
@@ -5,7 +5,7 @@
],
"labels": ["dependencies"],
"prHourlyLimit": 4,
- "baseBranches": ["main", "release-2.9.x", "release-2.8.x"],
+ "baseBranches": ["main"],
"packageRules": [
{
"matchBaseBranches": ["release-2.9.x","release-2.8.x"],
|
chore
|
Turn off renovate on non-main branches (#13803)
|
06553d8b96b4cef4d6dcf461abc3320b1000e60b
|
2025-03-15 02:30:34
|
renovate[bot]
|
fix(deps): update dependency cmdk to v1.1.1 (main) (#16766)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index 07084adf3e352..b8dd4455133a8 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -3536,15 +3536,15 @@
}
},
"node_modules/cmdk": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.0.4.tgz",
- "integrity": "sha512-AnsjfHyHpQ/EFeAnG216WY7A5LiYCoZzCSygiLvfXC3H3LFGCprErteUcszaVluGOhuOTbJS3jWHrSDYPBBygg==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cmdk/-/cmdk-1.1.1.tgz",
+ "integrity": "sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==",
"license": "MIT",
"dependencies": {
- "@radix-ui/react-dialog": "^1.1.2",
+ "@radix-ui/react-compose-refs": "^1.1.1",
+ "@radix-ui/react-dialog": "^1.1.6",
"@radix-ui/react-id": "^1.1.0",
- "@radix-ui/react-primitive": "^2.0.0",
- "use-sync-external-store": "^1.2.2"
+ "@radix-ui/react-primitive": "^2.0.2"
},
"peerDependencies": {
"react": "^18 || ^19 || ^19.0.0-rc",
@@ -6533,15 +6533,6 @@
}
}
},
- "node_modules/use-sync-external-store": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.4.0.tgz",
- "integrity": "sha512-9WXSPC5fMv61vaupRkCKCxsPxBocVnwakBEkMIHHpkTTg6icbJtg6jzgtLDm4bl3cSHAca52rYWih0k4K3PfHw==",
- "license": "MIT",
- "peerDependencies": {
- "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
- }
- },
"node_modules/util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
fix
|
update dependency cmdk to v1.1.1 (main) (#16766)
|
96515e31166f79cf552dd9d5ef226ccedebfa562
|
2020-07-20 20:04:45
|
Jeroen Op 't Eynde
|
refactor: moved jaeger-agent-mixin (#2377)
| false
|
diff --git a/production/ksonnet/loki-canary/jsonnetfile.json b/production/ksonnet/loki-canary/jsonnetfile.json
index c903ac17c07c9..44f04bb797bba 100644
--- a/production/ksonnet/loki-canary/jsonnetfile.json
+++ b/production/ksonnet/loki-canary/jsonnetfile.json
@@ -1,14 +1,15 @@
{
- "dependencies": [
- {
- "name": "ksonnet-util",
- "source": {
- "git": {
- "remote": "https://github.com/grafana/jsonnet-libs",
- "subdir": "ksonnet-util"
- }
- },
- "version": "master"
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/jsonnet-libs.git",
+ "subdir": "ksonnet-util"
}
- ]
+ },
+ "version": "master"
+ }
+ ],
+ "legacyImports": true
}
diff --git a/production/ksonnet/loki/jsonnetfile.json b/production/ksonnet/loki/jsonnetfile.json
index 7f40dd6152e7d..c46c23f64def6 100644
--- a/production/ksonnet/loki/jsonnetfile.json
+++ b/production/ksonnet/loki/jsonnetfile.json
@@ -1,34 +1,42 @@
{
- "dependencies": [
- {
- "name": "ksonnet-util",
- "source": {
- "git": {
- "remote": "https://github.com/grafana/jsonnet-libs",
- "subdir": "ksonnet-util"
- }
- },
- "version": "master"
- },
- {
- "name": "consul",
- "source": {
- "git": {
- "remote": "https://github.com/grafana/jsonnet-libs",
- "subdir": "consul"
- }
- },
- "version": "master"
- },
- {
- "name": "memcached",
- "source": {
- "git": {
- "remote": "https://github.com/grafana/jsonnet-libs",
- "subdir": "memcached"
- }
- },
- "version": "master"
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/jsonnet-libs.git",
+ "subdir": "consul"
}
- ]
+ },
+ "version": "master"
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/jsonnet-libs.git",
+ "subdir": "jaeger-agent-mixin"
+ }
+ },
+ "version": "master"
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/jsonnet-libs.git",
+ "subdir": "ksonnet-util"
+ }
+ },
+ "version": "master"
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/jsonnet-libs.git",
+ "subdir": "memcached"
+ }
+ },
+ "version": "master"
+ }
+ ],
+ "legacyImports": true
}
diff --git a/production/ksonnet/loki/loki.libsonnet b/production/ksonnet/loki/loki.libsonnet
index 6cc7add7cdbaa..7714b6ee4300f 100644
--- a/production/ksonnet/loki/loki.libsonnet
+++ b/production/ksonnet/loki/loki.libsonnet
@@ -1,5 +1,5 @@
(import 'ksonnet-util/kausal.libsonnet') +
-(import 'ksonnet-util/jaeger.libsonnet') +
+(import 'jaeger-agent-mixin/jaeger.libsonnet') +
(import 'images.libsonnet') +
(import 'common.libsonnet') +
(import 'config.libsonnet') +
diff --git a/production/ksonnet/promtail/jsonnetfile.json b/production/ksonnet/promtail/jsonnetfile.json
index c903ac17c07c9..44f04bb797bba 100644
--- a/production/ksonnet/promtail/jsonnetfile.json
+++ b/production/ksonnet/promtail/jsonnetfile.json
@@ -1,14 +1,15 @@
{
- "dependencies": [
- {
- "name": "ksonnet-util",
- "source": {
- "git": {
- "remote": "https://github.com/grafana/jsonnet-libs",
- "subdir": "ksonnet-util"
- }
- },
- "version": "master"
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/jsonnet-libs.git",
+ "subdir": "ksonnet-util"
}
- ]
+ },
+ "version": "master"
+ }
+ ],
+ "legacyImports": true
}
|
refactor
|
moved jaeger-agent-mixin (#2377)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.