hash
stringlengths 40
40
| date
stringdate 2018-12-11 14:31:19
2025-03-22 02:45:31
| author
stringclasses 280
values | commit_message
stringlengths 14
176
| is_merge
bool 1
class | git_diff
stringlengths 198
25.8M
⌀ | type
stringclasses 83
values | masked_commit_message
stringlengths 8
170
|
|---|---|---|---|---|---|---|---|
3509edd98dd166c00764834ed35b56dd3e9e3d24
|
2023-06-20 23:56:31
|
Joao Marcal
|
operator: fix replica count for 1x.ExtraSmall (#9751)
| false
|
diff --git a/operator/internal/manifests/internal/sizes.go b/operator/internal/manifests/internal/sizes.go
index c5b121e18bf93..03915b198908f 100644
--- a/operator/internal/manifests/internal/sizes.go
+++ b/operator/internal/manifests/internal/sizes.go
@@ -312,25 +312,25 @@ var StackSizeTable = map[lokiv1.LokiStackSizeType]lokiv1.LokiStackSpec{
Replicas: 1,
},
Distributor: &lokiv1.LokiComponentSpec{
- Replicas: 1,
+ Replicas: 2,
},
Ingester: &lokiv1.LokiComponentSpec{
- Replicas: 1,
+ Replicas: 2,
},
Querier: &lokiv1.LokiComponentSpec{
- Replicas: 1,
+ Replicas: 2,
},
QueryFrontend: &lokiv1.LokiComponentSpec{
- Replicas: 1,
+ Replicas: 2,
},
Gateway: &lokiv1.LokiComponentSpec{
Replicas: 2,
},
IndexGateway: &lokiv1.LokiComponentSpec{
- Replicas: 1,
+ Replicas: 2,
},
Ruler: &lokiv1.LokiComponentSpec{
- Replicas: 1,
+ Replicas: 2,
},
},
},
|
operator
|
fix replica count for 1x.ExtraSmall (#9751)
|
4e61c6029616facd54d254fab2ce6c4189934964
|
2022-10-11 16:29:21
|
Dennis Szczepanski
|
loki: Renames metric 'loki_log_messages_total' to 'loki_internal_log_messages_total' (#7361)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c41e8df5b5266..33d526c0f4797 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -40,6 +40,7 @@
* [6349](https://github.com/grafana/loki/pull/6349) **simonswine**: Update the default HTTP listen port from 80 to 3100. Make sure to configure the port explicitly if you are using port 80.
* [6835](https://github.com/grafana/loki/pull/6835) **DylanGuedes**: Add new per-tenant query timeout configuration and remove engine query timeout.
* [7212](https://github.com/grafana/loki/pull/7212) **Juneezee**: Replaces deprecated `io/ioutil` with `io` and `os`.
+* [7361](https://github.com/grafana/loki/pull/7361) **szczepad**: Renames metric `loki_log_messages_total` to `loki_internal_log_messages_total`
#### Promtail
diff --git a/docs/sources/operations/observability.md b/docs/sources/operations/observability.md
index ebad8fd6d6ac6..df39210a5c68a 100644
--- a/docs/sources/operations/observability.md
+++ b/docs/sources/operations/observability.md
@@ -12,10 +12,11 @@ for more information.
All components of Loki expose the following metrics:
-| Metric Name | Metric Type | Description |
-| ------------------------------- | ----------- | ---------------------------------------- |
-| `loki_log_messages_total` | Counter | Total number of messages logged by Loki. |
-| `loki_request_duration_seconds` | Histogram | Number of received HTTP requests. |
+| Metric Name | Metric Type | Description |
+| ---------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------- |
+| `loki_log_messages_total` | Counter | DEPRECATED. Use internal_log_messages_total for the same functionality. Total number of log messages created by loki itself. |
+| `loki_internal_log_messages_total` | Counter | Total number of log messages created by loki itself. |
+| `loki_request_duration_seconds` | Histogram | Number of received HTTP requests. |
The Loki Distributors expose the following metrics:
diff --git a/docs/sources/upgrading/_index.md b/docs/sources/upgrading/_index.md
index 1900696408811..e43f20089ff81 100644
--- a/docs/sources/upgrading/_index.md
+++ b/docs/sources/upgrading/_index.md
@@ -98,6 +98,10 @@ The global `deletion_mode` option in the compactor configuration moved to runtim
- The `deletion_mode` global override needs to be set to the desired mode: `disabled`, `filter-only`, or `filter-and-delete`. By default, `filter-and-delete` is enabled.
- Any `allow_delete` per-tenant overrides need to be removed or changed to `deletion_mode` overrides with the desired mode.
+#### Metric name for `loki_log_messages_total` changed
+
+The name of this metric was changed to `loki_internal_log_messages_total` to reduce ambiguity. The previous name is still present but is deprecated.
+
### Promtail
#### `gcp_push_target_parsing_errors_total` has a new `reason` label
diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go
index cec0969c77758..fcf0419b525a8 100644
--- a/pkg/util/log/log.go
+++ b/pkg/util/log/log.go
@@ -39,9 +39,10 @@ func InitLogger(cfg *server.Config, reg prometheus.Registerer, buffered bool, sy
// prometheusLogger exposes Prometheus counters for each of go-kit's log levels.
type prometheusLogger struct {
- logger log.Logger
- logMessages *prometheus.CounterVec
- logFlushes prometheus.Histogram
+ logger log.Logger
+ logMessages *prometheus.CounterVec
+ internalLogMessages *prometheus.CounterVec
+ logFlushes prometheus.Histogram
useBufferedLogger bool
useSyncLogger bool
@@ -61,7 +62,12 @@ func newPrometheusLogger(l logging.Level, format logging.Format, reg prometheus.
logMessages := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Namespace: "loki",
Name: "log_messages_total",
- Help: "Total number of log messages.",
+ Help: "DEPRECATED. Use internal_log_messages_total for the same functionality. Total number of log messages created by Loki itself.",
+ }, []string{"level"})
+ internalLogMessages := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
+ Namespace: "loki",
+ Name: "internal_log_messages_total",
+ Help: "Total number of log messages created by Loki itself.",
}, []string{"level"})
logFlushes := promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Namespace: "loki",
@@ -96,9 +102,10 @@ func newPrometheusLogger(l logging.Level, format logging.Format, reg prometheus.
logger = level.NewFilter(logger, levelFilter(l.String()))
plogger := &prometheusLogger{
- logger: logger,
- logMessages: logMessages,
- logFlushes: logFlushes,
+ logger: logger,
+ logMessages: logMessages,
+ internalLogMessages: internalLogMessages,
+ logFlushes: logFlushes,
}
// Initialise counters for all supported levels:
supportedLevels := []level.Value{
@@ -109,6 +116,7 @@ func newPrometheusLogger(l logging.Level, format logging.Format, reg prometheus.
}
for _, level := range supportedLevels {
plogger.logMessages.WithLabelValues(level.String())
+ plogger.internalLogMessages.WithLabelValues(level.String())
}
// return a Logger without caller information, shouldn't use directly
@@ -126,6 +134,7 @@ func (pl *prometheusLogger) Log(kv ...interface{}) error {
}
}
pl.logMessages.WithLabelValues(l).Inc()
+ pl.internalLogMessages.WithLabelValues(l).Inc()
return nil
}
|
loki
|
Renames metric 'loki_log_messages_total' to 'loki_internal_log_messages_total' (#7361)
|
658fcb710302f1ce844ed0c2dacaf6eb84db5c82
|
2024-04-09 01:51:53
|
Trevor Whitney
|
ci: remove 3.0 release off main (#12522)
| false
|
diff --git a/.github/release-workflows.jsonnet b/.github/release-workflows.jsonnet
index 27d72bcf477fb..bf13bb5da1bbb 100644
--- a/.github/release-workflows.jsonnet
+++ b/.github/release-workflows.jsonnet
@@ -20,7 +20,7 @@ local imageJobs = {
querytee: build.image('loki-query-tee', 'cmd/querytee', platform=['linux/amd64']),
};
-local buildImage = 'grafana/loki-build-image:0.33.0';
+local buildImage = 'grafana/loki-build-image:0.33.1';
local golangCiLintVersion = 'v1.55.1';
local imageBuildTimeoutMin = 40;
@@ -65,25 +65,6 @@ local imagePrefix = 'grafana';
name: 'Prepare Minor Release PR from Weekly',
}, false, false
),
- 'three-zero-release.yml': std.manifestYamlDoc(
- lokiRelease.releasePRWorkflow(
- branches=['main'],
- buildImage=buildImage,
- checkTemplate=checkTemplate,
- golangCiLintVersion=golangCiLintVersion,
- imageBuildTimeoutMin=imageBuildTimeoutMin,
- imageJobs=imageJobs,
- imagePrefix=imagePrefix,
- releaseLibRef=releaseLibRef,
- releaseRepo='grafana/loki',
- skipArm=false,
- skipValidation=false,
- useGitHubAppToken=true,
- releaseAs='3.0.0-rc.1',
- ) + {
- name: 'Prepare Loki 3.0 release',
- }, false, false
- ),
'release.yml': std.manifestYamlDoc(
lokiRelease.releaseWorkflow(
branches=['release-[0-9]+.[0-9]+.x', 'k[0-9]+', 'main'],
diff --git a/.github/workflows/three-0-release.yml b/.github/workflows/three-0-release.yml
deleted file mode 100644
index e69de29bb2d1d..0000000000000
diff --git a/.github/workflows/three-zero-release.yml b/.github/workflows/three-zero-release.yml
deleted file mode 100644
index 9a1b8c9e40b08..0000000000000
--- a/.github/workflows/three-zero-release.yml
+++ /dev/null
@@ -1,830 +0,0 @@
-concurrency:
- group: "create-release-pr-${{ github.sha }}"
-env:
- BUILD_ARTIFACTS_BUCKET: "loki-build-artifacts"
- BUILD_TIMEOUT: 40
- CHANGELOG_PATH: "CHANGELOG.md"
- DOCKER_USERNAME: "grafana"
- DRY_RUN: false
- IMAGE_PREFIX: "grafana"
- RELEASE_AS: "3.0.0-rc.1"
- RELEASE_LIB_REF: "main"
- RELEASE_REPO: "grafana/loki"
- SKIP_VALIDATION: false
- USE_GITHUB_APP_TOKEN: true
- VERSIONING_STRATEGY: "always-bump-patch"
-jobs:
- check:
- uses: "grafana/loki-release/.github/workflows/check.yml@main"
- with:
- build_image: "grafana/loki-build-image:0.33.1"
- golang_ci_lint_version: "v1.55.1"
- release_lib_ref: "main"
- skip_validation: false
- use_github_app_token: true
- create-release-pr:
- needs:
- - "dist"
- - "fluent-bit"
- - "fluentd"
- - "logcli"
- - "logstash"
- - "loki"
- - "loki-canary"
- - "loki-canary-boringcrypto"
- - "promtail"
- - "querytee"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - id: "extract_branch"
- name: "extract branch name"
- run: |
- echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
- working-directory: "release"
- - id: "get_github_app_token"
- if: "${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}"
- name: "get github app token"
- uses: "actions/github-app-token@v1"
- with:
- app-id: "${{ secrets.APP_ID }}"
- owner: "${{ github.repository_owner }}"
- private-key: "${{ secrets.APP_PRIVATE_KEY }}"
- - id: "github_app_token"
- name: "set github token"
- run: |
- if [[ "${USE_GITHUB_APP_TOKEN}" == "true" ]]; then
- echo "token=${{ steps.get_github_app_token.outputs.token }}" >> $GITHUB_OUTPUT
- else
- echo "token=${{ secrets.GH_TOKEN }}" >> $GITHUB_OUTPUT
- fi
- - env:
- SHA: "${{ github.sha }}"
- id: "release"
- name: "release please"
- run: |
- npm install
- npm exec -- release-please release-pr \
- --changelog-path "${CHANGELOG_PATH}" \
- --consider-all-branches \
- --group-pull-request-title-pattern "chore\${scope}: release\${component} \${version}" \
- --label "backport main,autorelease: pending,product-approved" \
- --manifest-file .release-please-manifest.json \
- --pull-request-footer "Merging this PR will release the [artifacts](https://console.cloud.google.com/storage/browser/${BUILD_ARTIFACTS_BUCKET}/${SHA}) of ${SHA}" \
- --pull-request-title-pattern "chore\${scope}: release\${component} \${version}" \
- --release-as "${{ needs.dist.outputs.version }}" \
- --release-type simple \
- --repo-url "${{ env.RELEASE_REPO }}" \
- --separate-pull-requests false \
- --target-branch "${{ steps.extract_branch.outputs.branch }}" \
- --token "${{ steps.github_app_token.outputs.token }}" \
- --dry-run ${{ fromJSON(env.DRY_RUN) }}
-
- working-directory: "lib"
- dist:
- needs:
- - "version"
- outputs:
- version: "${{ needs.version.outputs.version }}"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up Cloud SDK"
- uses: "google-github-actions/setup-gcloud@v2"
- with:
- version: ">= 452.0.0"
- - id: "get-secrets"
- name: "get nfpm signing keys"
- uses: "grafana/shared-workflows/actions/get-vault-secrets@main"
- with:
- common_secrets: |
- NFPM_SIGNING_KEY=packages-gpg:private-key
- NFPM_PASSPHRASE=packages-gpg:passphrase
- - env:
- BUILD_IN_CONTAINER: false
- DRONE_TAG: "${{ needs.version.outputs.version }}"
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- NFPM_SIGNING_KEY_FILE: "nfpm-private-key.key"
- SKIP_ARM: false
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "build artifacts"
- run: |
- cat <<EOF | docker run \
- --interactive \
- --env BUILD_IN_CONTAINER \
- --env DRONE_TAG \
- --env IMAGE_TAG \
- --env NFPM_PASSPHRASE \
- --env NFPM_SIGNING_KEY \
- --env NFPM_SIGNING_KEY_FILE \
- --env SKIP_ARM \
- --volume .:/src/loki \
- --workdir /src/loki \
- --entrypoint /bin/sh "grafana/loki-build-image:0.33.1"
- git config --global --add safe.directory /src/loki
- echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE
- make dist packages
- EOF
- working-directory: "release"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}"
- path: "release/dist"
- process_gcloudignore: false
- fluent-bit:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/clients/cmd/fluent-bit/Dockerfile"
- outputs: "type=docker,dest=release/images/fluent-bit-plugin-loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/fluent-bit-plugin-loki:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/fluent-bit-plugin-loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- fluentd:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/clients/cmd/fluentd/Dockerfile"
- outputs: "type=docker,dest=release/images/fluent-plugin-loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/fluent-plugin-loki:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/fluent-plugin-loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- logcli:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/cmd/logcli/Dockerfile"
- outputs: "type=docker,dest=release/images/logcli-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/logcli:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/logcli-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- - "linux/arm64"
- - "linux/arm"
- logstash:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/clients/cmd/logstash/Dockerfile"
- outputs: "type=docker,dest=release/images/logstash-output-loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/logstash-output-loki:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/logstash-output-loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- loki:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/cmd/loki/Dockerfile"
- outputs: "type=docker,dest=release/images/loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/loki:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/loki-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- - "linux/arm64"
- - "linux/arm"
- loki-canary:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/cmd/loki-canary/Dockerfile"
- outputs: "type=docker,dest=release/images/loki-canary-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/loki-canary:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/loki-canary-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- - "linux/arm64"
- - "linux/arm"
- loki-canary-boringcrypto:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/cmd/loki-canary-boringcrypto/Dockerfile"
- outputs: "type=docker,dest=release/images/loki-canary-boringcrypto-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/loki-canary-boringcrypto:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/loki-canary-boringcrypto-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- - "linux/arm64"
- - "linux/arm"
- promtail:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/clients/cmd/promtail/Dockerfile"
- outputs: "type=docker,dest=release/images/promtail-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/promtail:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/promtail-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- - "linux/arm64"
- - "linux/arm"
- querytee:
- needs:
- - "version"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - name: "auth gcs"
- uses: "google-github-actions/auth@v2"
- with:
- credentials_json: "${{ secrets.GCS_SERVICE_ACCOUNT_KEY }}"
- - name: "Set up QEMU"
- uses: "docker/setup-qemu-action@v3"
- - name: "set up docker buildx"
- uses: "docker/setup-buildx-action@v3"
- - id: "platform"
- name: "parse image platform"
- run: |
- mkdir -p images
-
- platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")"
- echo "platform=${platform}" >> $GITHUB_OUTPUT
- echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT
- working-directory: "release"
- - env:
- IMAGE_TAG: "${{ needs.version.outputs.version }}"
- if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "Build and export"
- timeout-minutes: "${{ fromJSON(env.BUILD_TIMEOUT) }}"
- uses: "docker/build-push-action@v5"
- with:
- build-args: "IMAGE_TAG=${{ needs.version.outputs.version }}"
- context: "release"
- file: "release/cmd/querytee/Dockerfile"
- outputs: "type=docker,dest=release/images/loki-query-tee-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- platforms: "${{ matrix.platform }}"
- tags: "${{ env.IMAGE_PREFIX }}/loki-query-tee:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}"
- - if: "${{ fromJSON(needs.version.outputs.pr_created) }}"
- name: "upload artifacts"
- uses: "google-github-actions/upload-cloud-storage@v2"
- with:
- destination: "${{ env.BUILD_ARTIFACTS_BUCKET }}/${{ github.sha }}/images"
- path: "release/images/loki-query-tee-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}.tar"
- process_gcloudignore: false
- strategy:
- fail-fast: true
- matrix:
- platform:
- - "linux/amd64"
- version:
- needs:
- - "check"
- outputs:
- pr_created: "${{ steps.version.outputs.pr_created }}"
- version: "${{ steps.version.outputs.version }}"
- runs-on: "ubuntu-latest"
- steps:
- - name: "pull release library code"
- uses: "actions/checkout@v4"
- with:
- path: "lib"
- ref: "${{ env.RELEASE_LIB_REF }}"
- repository: "grafana/loki-release"
- - name: "pull code to release"
- uses: "actions/checkout@v4"
- with:
- path: "release"
- repository: "${{ env.RELEASE_REPO }}"
- - name: "setup node"
- uses: "actions/setup-node@v4"
- with:
- node-version: 20
- - id: "extract_branch"
- name: "extract branch name"
- run: |
- echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT
- working-directory: "release"
- - id: "get_github_app_token"
- if: "${{ fromJSON(env.USE_GITHUB_APP_TOKEN) }}"
- name: "get github app token"
- uses: "actions/github-app-token@v1"
- with:
- app-id: "${{ secrets.APP_ID }}"
- owner: "${{ github.repository_owner }}"
- private-key: "${{ secrets.APP_PRIVATE_KEY }}"
- - id: "github_app_token"
- name: "set github token"
- run: |
- if [[ "${USE_GITHUB_APP_TOKEN}" == "true" ]]; then
- echo "token=${{ steps.get_github_app_token.outputs.token }}" >> $GITHUB_OUTPUT
- else
- echo "token=${{ secrets.GH_TOKEN }}" >> $GITHUB_OUTPUT
- fi
- - id: "version"
- name: "get release version"
- run: |
- npm install
-
- if [[ -z "${{ env.RELEASE_AS }}" ]]; then
- npm exec -- release-please release-pr \
- --consider-all-branches \
- --dry-run \
- --dry-run-output release.json \
- --group-pull-request-title-pattern "chore\${scope}: release\${component} \${version}" \
- --manifest-file .release-please-manifest.json \
- --pull-request-title-pattern "chore\${scope}: release\${component} \${version}" \
- --release-type simple \
- --repo-url "${{ env.RELEASE_REPO }}" \
- --separate-pull-requests false \
- --target-branch "${{ steps.extract_branch.outputs.branch }}" \
- --token "${{ steps.github_app_token.outputs.token }}" \
- --versioning-strategy "${{ env.VERSIONING_STRATEGY }}"
- else
- npm exec -- release-please release-pr \
- --consider-all-branches \
- --dry-run \
- --dry-run-output release.json \
- --group-pull-request-title-pattern "chore\${scope}: release\${component} \${version}" \
- --manifest-file .release-please-manifest.json \
- --pull-request-title-pattern "chore\${scope}: release\${component} \${version}" \
- --release-type simple \
- --repo-url "${{ env.RELEASE_REPO }}" \
- --separate-pull-requests false \
- --target-branch "${{ steps.extract_branch.outputs.branch }}" \
- --token "${{ steps.github_app_token.outputs.token }}" \
- --release-as "${{ env.RELEASE_AS }}"
- fi
-
- cat release.json
-
- if [[ `jq length release.json` -gt 1 ]]; then
- echo 'release-please would create more than 1 PR, so cannot determine correct version'
- echo "pr_created=false" >> $GITHUB_OUTPUT
- exit 1
- fi
-
- if [[ `jq length release.json` -eq 0 ]]; then
- echo "pr_created=false" >> $GITHUB_OUTPUT
- else
- version="$(npm run --silent get-version)"
- echo "Parsed version: ${version}"
- echo "version=${version}" >> $GITHUB_OUTPUT
- echo "pr_created=true" >> $GITHUB_OUTPUT
- fi
- working-directory: "lib"
-name: "Prepare Loki 3.0 release"
-"on":
- push:
- branches:
- - "main"
-permissions:
- contents: "write"
- id-token: "write"
- pull-requests: "write"
|
ci
|
remove 3.0 release off main (#12522)
|
0e519b46e00d15933c680c5e357d1b3469536d26
|
2021-07-22 13:30:37
|
Karen Miller
|
docs: correct fluentbit config value for DqueSync (#4026)
| false
|
diff --git a/docs/sources/clients/fluentbit/_index.md b/docs/sources/clients/fluentbit/_index.md
index d3f22989f8a86..a405fa0f25169 100644
--- a/docs/sources/clients/fluentbit/_index.md
+++ b/docs/sources/clients/fluentbit/_index.md
@@ -85,7 +85,7 @@ You can also adapt your plugins.conf, removing the need to change the command li
| BufferType | Specify the buffering mechanism to use (currently only dque is implemented). | dque |
| DqueDir | Path to the directory for queued logs | /tmp/flb-storage/loki |
| DqueSegmentSize | Segment size in terms of number of records per segment | 500 |
-| DqueSync | Whether to fsync each queue change | false |
+| DqueSync | Whether to fsync each queue change. Specify no fsync with "normal", and fsync with "full". | "normal" |
| DqueName | Queue name, must be uniq per output | dque |
### Labels
|
docs
|
correct fluentbit config value for DqueSync (#4026)
|
a26416d0782c96e5cfb983fd2b7961d8137e9cfa
|
2024-09-19 00:10:23
|
Vladyslav Diachenko
|
ci(helm): update Kube Diff workflow (#14173)
| false
|
diff --git a/.github/workflows/helm-loki-ci.yml b/.github/workflows/helm-loki-ci.yml
index 0ebc712c99e95..ca4daaaeb467b 100644
--- a/.github/workflows/helm-loki-ci.yml
+++ b/.github/workflows/helm-loki-ci.yml
@@ -32,12 +32,14 @@ jobs:
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.base.ref }}
+ repository: ${{ github.event.pull_request.base.repo.full_name }}
path: ${{ github.workspace }}/base
- name: Checkout PR branch to 'pr' folder within workspace
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.ref }}
+ repository: ${{ github.event.pull_request.head.repo.full_name }}
path: ${{ github.workspace }}/pr
- name: Render Helm chart for each scenario in the base branch
|
ci
|
update Kube Diff workflow (#14173)
|
0f0553ff1c1e0339da96ddff96978b4fe7836870
|
2023-02-21 16:21:30
|
a5r0n
|
helm: make multi tenant easy: use tenant id from nginx $remote_user (#8404)
| false
|
diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md
index 88b8534f5e4a5..ba70724923066 100644
--- a/docs/sources/installation/helm/reference.md
+++ b/docs/sources/installation/helm/reference.md
@@ -803,9 +803,9 @@ null
<tr>
<td>gateway.basicAuth.htpasswd</td>
<td>string</td>
- <td>Uses the specified username and password to compute a htpasswd using Sprig's `htpasswd` function. The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes high CPU load.</td>
+ <td>Uses the specified users from the `loki.tenants` list to create the htpasswd file if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes high CPU load.</td>
<td><pre lang="json">
-"{{ htpasswd (required \"'gateway.basicAuth.username' is required\" .Values.gateway.basicAuth.username) (required \"'gateway.basicAuth.password' is required\" .Values.gateway.basicAuth.password) }}"
+"{{ if .Values.loki.tenants }}\n {{- range $t := .Values.loki.tenants }}\n{{ htpasswd (required \"All tenants must have a 'name' set\" $t.name) (required \"All tenants must have a 'password' set\" $t.password) }}\n {{- end }}\n{{ else }} {{ htpasswd (required \"'gateway.basicAuth.username' is required\" .Values.gateway.basicAuth.username) (required \"'gateway.basicAuth.password' is required\" .Values.gateway.basicAuth.password) }} {{ end }}"
</pre>
</td>
</tr>
@@ -1054,9 +1054,9 @@ See values.yaml
<tr>
<td>gateway.nginxConfig.httpSnippet</td>
<td>string</td>
- <td>Allows appending custom configuration to the http block</td>
+ <td>Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating</td>
<td><pre lang="json">
-""
+"{{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }}"
</pre>
</td>
</tr>
@@ -1935,6 +1935,15 @@ null
<td><pre lang="json">
{}
</pre>
+</td>
+ </tr>
+ <tr>
+ <td>loki.tenants</td>
+ <td>list</td>
+ <td>Tenants list to be created on nginx htpasswd file, with name and password keys</td>
+ <td><pre lang="json">
+[]
+</pre>
</td>
</tr>
<tr>
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index 2f837ade33d7e..4d0ca34e1a006 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -556,7 +556,7 @@ http {
resolver {{ .Values.global.dnsService }}.{{ .Values.global.dnsNamespace }}.svc.{{ .Values.global.clusterDomain }}.;
{{- with .Values.gateway.nginxConfig.httpSnippet }}
- {{ . | nindent 2 }}
+ {{- tpl . $ | nindent 2 }}
{{- end }}
server {
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index ac047d114a8aa..2fc86c4ebf997 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -202,6 +202,8 @@ loki:
# Should authentication be enabled
auth_enabled: true
+ # -- Tenants list to be created on nginx htpasswd file, with name and password keys
+ tenants: []
# -- Check https://grafana.com/docs/loki/latest/configuration/#server for more info on the server configuration.
server:
@@ -1171,12 +1173,18 @@ gateway:
username: null
# -- The basic auth password for the gateway
password: null
- # -- Uses the specified username and password to compute a htpasswd using Sprig's `htpasswd` function.
+ # -- Uses the specified users from the `loki.tenants` list to create the htpasswd file
+ # if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used
# The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes
# high CPU load.
htpasswd: >-
+ {{ if .Values.loki.tenants }}
+ {{- range $t := .Values.loki.tenants }}
+ {{ htpasswd (required "All tenants must have a 'name' set" $t.name) (required "All tenants must have a 'password' set" $t.password) }}
+ {{- end }}
+ {{ else }}
{{ htpasswd (required "'gateway.basicAuth.username' is required" .Values.gateway.basicAuth.username) (required "'gateway.basicAuth.password' is required" .Values.gateway.basicAuth.password) }}
-
+ {{ end }}
# -- Existing basic auth secret to use. Must contain '.htpasswd'
existingSecret: null
# Configures the readiness probe for the gateway
@@ -1194,8 +1202,9 @@ gateway:
'"$http_user_agent" "$http_x_forwarded_for"';
# -- Allows appending custom configuration to the server block
serverSnippet: ""
- # -- Allows appending custom configuration to the http block
- httpSnippet: ""
+ # -- Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating
+ httpSnippet: >-
+ {{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }}
# -- Override Read URL
customReadUrl: null
# -- Override Write URL
|
helm
|
make multi tenant easy: use tenant id from nginx $remote_user (#8404)
|
63c88489d009929f704dabc9fdcd18662214a174
|
2024-03-15 13:40:16
|
Salva Corts
|
refactor: Add RF and Tokens to Loki ring Cfg and allow overwriting docs. (#12142)
| false
|
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index 18fcc83bb5929..c327e919f059f 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -1763,6 +1763,12 @@ ring:
# CLI flag: -index-gateway.ring.zone-awareness-enabled
[zone_awareness_enabled: <boolean> | default = false]
+ # Deprecated: How many index gateway instances are assigned to each tenant.
+ # Use -index-gateway.shard-size instead. The shard size is also a per-tenant
+ # setting.
+ # CLI flag: -replication-factor
+ [replication_factor: <int> | default = 3]
+
# Instance ID to register in the ring.
# CLI flag: -index-gateway.ring.instance-id
[instance_id: <string> | default = "<hostname>"]
@@ -1787,12 +1793,6 @@ ring:
# Enable using a IPv6 instance address.
# CLI flag: -index-gateway.ring.instance-enable-ipv6
[instance_enable_ipv6: <boolean> | default = false]
-
- # Deprecated: How many index gateway instances are assigned to each tenant.
- # Use -index-gateway.shard-size instead. The shard size is also a per-tenant
- # setting.
- # CLI flag: -replication-factor
- [replication_factor: <int> | default = 3]
```
### bloom_gateway
@@ -1860,6 +1860,17 @@ ring:
# CLI flag: -bloom-gateway.ring.zone-awareness-enabled
[zone_awareness_enabled: <boolean> | default = false]
+ # Number of tokens to use in the ring. The bigger the number of tokens, the
+ # more fingerprint ranges the compactor will own, but the smaller these ranges
+ # will be. Bigger number of tokens means that more but smaller requests will
+ # be handled by each gateway.
+ # CLI flag: -bloom-gateway.ring.tokens
+ [num_tokens: <int> | default = 16]
+
+ # Factor for data replication.
+ # CLI flag: -bloom-gateway.ring.replication-factor
+ [replication_factor: <int> | default = 3]
+
# Instance ID to register in the ring.
# CLI flag: -bloom-gateway.ring.instance-id
[instance_id: <string> | default = "<hostname>"]
@@ -1885,17 +1896,6 @@ ring:
# CLI flag: -bloom-gateway.ring.instance-enable-ipv6
[instance_enable_ipv6: <boolean> | default = false]
- # Factor for data replication.
- # CLI flag: -bloom-gateway.replication-factor
- [replication_factor: <int> | default = 3]
-
- # Number of tokens to use in the ring. The bigger the number of tokens, the
- # more fingerprint ranges the compactor will own, but the smaller these ranges
- # will be. Bigger number of tokens means that more but smaller requests will
- # be handled by each gateway.
- # CLI flag: -bloom-gateway.ring.tokens
- [tokens: <int> | default = 16]
-
# Flag to enable or disable the bloom gateway component globally.
# CLI flag: -bloom-gateway.enabled
[enabled: <boolean> | default = false]
@@ -2654,6 +2654,11 @@ ring:
# CLI flag: -bloom-compactor.ring.zone-awareness-enabled
[zone_awareness_enabled: <boolean> | default = false]
+ # Number of tokens to use in the ring per compactor. Higher number of tokens
+ # will result in more and smaller files (metas and blocks.)
+ # CLI flag: -bloom-compactor.ring.num-tokens
+ [num_tokens: <int> | default = 10]
+
# Instance ID to register in the ring.
# CLI flag: -bloom-compactor.ring.instance-id
[instance_id: <string> | default = "<hostname>"]
@@ -2679,13 +2684,6 @@ ring:
# CLI flag: -bloom-compactor.ring.instance-enable-ipv6
[instance_enable_ipv6: <boolean> | default = false]
- # Number of tokens to use in the ring. The bigger the number of tokens, the
- # more fingerprint ranges the compactor will own, but the smaller these ranges
- # will be. Bigger number of tokens will result in more and smaller metas and
- # blocks.
- # CLI flag: -bloom-compactor.ring.tokens
- [tokens: <int> | default = 10]
-
# Flag to enable or disable the usage of the bloom-compactor component.
# CLI flag: -bloom-compactor.enabled
[enabled: <boolean> | default = false]
@@ -3836,6 +3834,14 @@ ring:
# CLI flag: -common.storage.ring.zone-awareness-enabled
[zone_awareness_enabled: <boolean> | default = false]
+ # Number of tokens to own in the ring.
+ # CLI flag: -common.storage.ring.num-tokens
+ [num_tokens: <int> | default = 128]
+
+ # Factor for data replication.
+ # CLI flag: -common.storage.ring.replication-factor
+ [replication_factor: <int> | default = 3]
+
# Instance ID to register in the ring.
# CLI flag: -common.storage.ring.instance-id
[instance_id: <string> | default = "<hostname>"]
diff --git a/pkg/bloomcompactor/bloomcompactor_test.go b/pkg/bloomcompactor/bloomcompactor_test.go
index 71d5b843ca04b..70e76d41e9856 100644
--- a/pkg/bloomcompactor/bloomcompactor_test.go
+++ b/pkg/bloomcompactor/bloomcompactor_test.go
@@ -68,22 +68,20 @@ func TestCompactor_ownsTenant(t *testing.T) {
var ringManagers []*lokiring.RingManager
var compactors []*Compactor
for i := 0; i < tc.compactors; i++ {
- var ringCfg RingConfig
- ringCfg.RegisterFlagsWithPrefix("", "", flag.NewFlagSet("ring", flag.PanicOnError))
- ringCfg.KVStore.Store = "inmemory"
- ringCfg.InstanceID = fmt.Sprintf("bloom-compactor-%d", i)
- ringCfg.InstanceAddr = fmt.Sprintf("localhost-%d", i)
+ var cfg Config
+ cfg.RegisterFlags(flag.NewFlagSet("ring", flag.PanicOnError))
+ cfg.Ring.KVStore.Store = "inmemory"
+ cfg.Ring.InstanceID = fmt.Sprintf("bloom-compactor-%d", i)
+ cfg.Ring.InstanceAddr = fmt.Sprintf("localhost-%d", i)
- ringManager, err := lokiring.NewRingManager("bloom-compactor", lokiring.ServerMode, ringCfg.RingConfig, 1, ringCfg.Tokens, util_log.Logger, prometheus.NewRegistry())
+ ringManager, err := lokiring.NewRingManager("bloom-compactor", lokiring.ServerMode, cfg.Ring, 1, cfg.Ring.NumTokens, util_log.Logger, prometheus.NewRegistry())
require.NoError(t, err)
require.NoError(t, ringManager.StartAsync(context.Background()))
shuffleSharding := util_ring.NewTenantShuffleSharding(ringManager.Ring, ringManager.RingLifecycler, tc.limits.BloomCompactorShardSize)
compactor := &Compactor{
- cfg: Config{
- Ring: ringCfg,
- },
+ cfg: cfg,
sharding: shuffleSharding,
limits: tc.limits,
}
diff --git a/pkg/bloomcompactor/config.go b/pkg/bloomcompactor/config.go
index b887493c1a867..fee457767647b 100644
--- a/pkg/bloomcompactor/config.go
+++ b/pkg/bloomcompactor/config.go
@@ -3,18 +3,23 @@ package bloomcompactor
import (
"flag"
"fmt"
+ "github.com/pkg/errors"
"time"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/downloads"
"github.com/grafana/loki/pkg/util/ring"
)
+const (
+ ringReplicationFactor = 1
+)
+
// Config configures the bloom-compactor component.
type Config struct {
// Ring configures the ring store used to save and retrieve the different Bloom-Compactor instances.
// In case it isn't explicitly set, it follows the same behavior of the other rings (ex: using the common configuration
// section and the ingester configuration by default).
- Ring RingConfig `yaml:"ring,omitempty" doc:"description=Defines the ring to be used by the bloom-compactor servers. In case this isn't configured, this block supports inheriting configuration from the common ring section."`
+ Ring ring.RingConfig `yaml:"ring,omitempty" doc:"description=Defines the ring to be used by the bloom-compactor servers. In case this isn't configured, this block supports inheriting configuration from the common ring section."`
// Enabled configures whether bloom-compactors should be used to compact index values into bloomfilters
Enabled bool `yaml:"enabled"`
CompactionInterval time.Duration `yaml:"compaction_interval"`
@@ -30,7 +35,6 @@ type Config struct {
// RegisterFlags registers flags for the Bloom-Compactor configuration.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
- cfg.Ring.RegisterFlagsWithPrefix("bloom-compactor.", "collectors/", f)
f.BoolVar(&cfg.Enabled, "bloom-compactor.enabled", false, "Flag to enable or disable the usage of the bloom-compactor component.")
f.DurationVar(&cfg.CompactionInterval, "bloom-compactor.compaction-interval", 10*time.Minute, "Interval at which to re-run the compaction operation.")
f.IntVar(&cfg.WorkerParallelism, "bloom-compactor.worker-parallelism", 1, "Number of workers to run in parallel for compaction.")
@@ -48,26 +52,29 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.RetryMaxBackoff, "bloom-compactor.compaction-retries-max-backoff", time.Minute, "Maximum backoff time between retries.")
f.IntVar(&cfg.CompactionRetries, "bloom-compactor.compaction-retries", 3, "Number of retries to perform when compaction fails.")
f.IntVar(&cfg.MaxCompactionParallelism, "bloom-compactor.max-compaction-parallelism", 1, "Maximum number of tables to compact in parallel. While increasing this value, please make sure compactor has enough disk space allocated to be able to store and compact as many tables.")
+
+ // Ring
+ skipFlags := []string{
+ "bloom-compactor.ring.num-tokens",
+ "bloom-compactor.ring.replication-factor",
+ }
+ cfg.Ring.RegisterFlagsWithPrefix("bloom-compactor.", "collectors/", f, skipFlags...)
+ // Overrides
+ f.IntVar(&cfg.Ring.NumTokens, "bloom-compactor.ring.num-tokens", 10, "Number of tokens to use in the ring per compactor. Higher number of tokens will result in more and smaller files (metas and blocks.)")
+ // Ignored
+ f.IntVar(&cfg.Ring.ReplicationFactor, "bloom-compactor.ring.replication-factor", ringReplicationFactor, fmt.Sprintf("IGNORED: Replication factor is fixed to %d", ringReplicationFactor))
}
func (cfg *Config) Validate() error {
if cfg.MinTableCompactionPeriod > cfg.MaxTableCompactionPeriod {
return fmt.Errorf("min_compaction_age must be less than or equal to max_compaction_age")
}
+ if cfg.Ring.ReplicationFactor != ringReplicationFactor {
+ return errors.New("Replication factor must not be changed as it will not take effect")
+ }
return nil
}
-type RingConfig struct {
- ring.RingConfig `yaml:",inline"`
-
- Tokens int `yaml:"tokens"`
-}
-
-func (cfg *RingConfig) RegisterFlagsWithPrefix(flagsPrefix, storePrefix string, f *flag.FlagSet) {
- cfg.RingConfig.RegisterFlagsWithPrefix(flagsPrefix, storePrefix, f)
- f.IntVar(&cfg.Tokens, flagsPrefix+"ring.tokens", 10, "Number of tokens to use in the ring. The bigger the number of tokens, the more fingerprint ranges the compactor will own, but the smaller these ranges will be. Bigger number of tokens will result in more and smaller metas and blocks.")
-}
-
type Limits interface {
downloads.Limits
BloomCompactorShardSize(tenantID string) int
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index 54651596f9b22..2318fdb535d05 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -102,16 +102,12 @@ func TestBloomGateway_StartStopService(t *testing.T) {
cfg := Config{
Enabled: true,
- Ring: RingConfig{
- RingConfigWithRF: lokiring.RingConfigWithRF{
- RingConfig: lokiring.RingConfig{
- KVStore: kv.Config{
- Mock: kvStore,
- },
- },
- ReplicationFactor: 1,
+ Ring: lokiring.RingConfig{
+ KVStore: kv.Config{
+ Mock: kvStore,
},
- Tokens: 16,
+ ReplicationFactor: 1,
+ NumTokens: 16,
},
WorkerConcurrency: 4,
MaxOutstandingPerTenant: 1024,
@@ -147,16 +143,12 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
cfg := Config{
Enabled: true,
- Ring: RingConfig{
- RingConfigWithRF: lokiring.RingConfigWithRF{
- RingConfig: lokiring.RingConfig{
- KVStore: kv.Config{
- Mock: kvStore,
- },
- },
- ReplicationFactor: 1,
+ Ring: lokiring.RingConfig{
+ KVStore: kv.Config{
+ Mock: kvStore,
},
- Tokens: 16,
+ ReplicationFactor: 1,
+ NumTokens: 16,
},
WorkerConcurrency: 4,
MaxOutstandingPerTenant: 1024,
diff --git a/pkg/bloomgateway/config.go b/pkg/bloomgateway/config.go
index 42c476e00d41a..ad5d2928728a6 100644
--- a/pkg/bloomgateway/config.go
+++ b/pkg/bloomgateway/config.go
@@ -11,7 +11,7 @@ type Config struct {
// Ring configures the ring store used to save and retrieve the different Bloom Gateway instances.
// In case it isn't explicitly set, it follows the same behavior of the other rings (ex: using the common configuration
// section and the ingester configuration by default).
- Ring RingConfig `yaml:"ring,omitempty" doc:"description=Defines the ring to be used by the bloom gateway servers and clients. In case this isn't configured, this block supports inheriting configuration from the common ring section."`
+ Ring ring.RingConfig `yaml:"ring,omitempty" doc:"description=Defines the ring to be used by the bloom gateway servers and clients. In case this isn't configured, this block supports inheriting configuration from the common ring section."`
// Enabled is the global switch to configures whether Bloom Gateways should be used to filter chunks.
Enabled bool `yaml:"enabled"`
// Client configures the Bloom Gateway client
@@ -29,7 +29,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
// RegisterFlagsWithPrefix registers flags for the Bloom Gateway configuration with a common prefix.
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
- cfg.Ring.RegisterFlagsWithPrefix(prefix, "collectors/", f)
f.BoolVar(&cfg.Enabled, prefix+"enabled", false, "Flag to enable or disable the bloom gateway component globally.")
f.IntVar(&cfg.WorkerConcurrency, prefix+"worker-concurrency", 4, "Number of workers to use for filtering chunks concurrently.")
f.IntVar(&cfg.MaxOutstandingPerTenant, prefix+"max-outstanding-per-tenant", 1024, "Maximum number of outstanding tasks per tenant.")
@@ -37,17 +36,13 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
// TODO(chaudum): Figure out what the better place is for registering flags
// -bloom-gateway.client.* or -bloom-gateway-client.*
cfg.Client.RegisterFlags(f)
-}
-
-type RingConfig struct {
- ring.RingConfigWithRF `yaml:",inline"`
-
- Tokens int `yaml:"tokens"`
-}
-func (cfg *RingConfig) RegisterFlagsWithPrefix(flagsPrefix, storePrefix string, f *flag.FlagSet) {
- cfg.RingConfigWithRF.RegisterFlagsWithPrefix(flagsPrefix, storePrefix, f)
- f.IntVar(&cfg.Tokens, flagsPrefix+"ring.tokens", 16, "Number of tokens to use in the ring. The bigger the number of tokens, the more fingerprint ranges the compactor will own, but the smaller these ranges will be. Bigger number of tokens means that more but smaller requests will be handled by each gateway.")
+ // Ring
+ skipFlags := []string{
+ prefix + "ring.tokens",
+ }
+ cfg.Ring.RegisterFlagsWithPrefix(prefix, "collectors/", f, skipFlags...)
+ f.IntVar(&cfg.Ring.NumTokens, prefix+"ring.tokens", 16, "Number of tokens to use in the ring. The bigger the number of tokens, the more fingerprint ranges the compactor will own, but the smaller these ranges will be. Bigger number of tokens means that more but smaller requests will be handled by each gateway.")
}
type Limits interface {
diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go
index f5062f2d6e33e..75bd575e2c77c 100644
--- a/pkg/compactor/compactor.go
+++ b/pkg/compactor/compactor.go
@@ -110,7 +110,14 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.TablesToCompact, "compactor.tables-to-compact", 0, "Number of tables that compactor will try to compact. Newer tables are chosen when this is less than the number of tables available.")
f.IntVar(&cfg.SkipLatestNTables, "compactor.skip-latest-n-tables", 0, "Do not compact N latest tables. Together with -compactor.run-once and -compactor.tables-to-compact, this is useful when clearing compactor backlogs.")
- cfg.CompactorRing.RegisterFlagsWithPrefix("compactor.", "collectors/", f)
+ // Ring
+ skipFlags := []string{
+ "compactor.ring.num-tokens",
+ "compactor.ring.replication-factor",
+ }
+ cfg.CompactorRing.RegisterFlagsWithPrefix("compactor.", "collectors/", f, skipFlags...)
+ f.IntVar(&cfg.CompactorRing.NumTokens, "compactor.ring.num-tokens", ringNumTokens, fmt.Sprintf("IGNORED: Num tokens is fixed to %d", ringNumTokens))
+ f.IntVar(&cfg.CompactorRing.ReplicationFactor, "compactor.ring.replication-factor", ringReplicationFactor, fmt.Sprintf("IGNORED: Replication factor is fixed to %d", ringReplicationFactor))
}
// Validate verifies the config does not contain inappropriate values
@@ -119,6 +126,14 @@ func (cfg *Config) Validate() error {
return errors.New("max compaction parallelism must be >= 1")
}
+ if cfg.CompactorRing.NumTokens != ringNumTokens {
+ return errors.New("Num tokens must not be changed as it will not take effect")
+ }
+
+ if cfg.CompactorRing.ReplicationFactor != ringReplicationFactor {
+ return errors.New("Replication factor must not be changed as it will not take effect")
+ }
+
if cfg.RetentionEnabled {
if cfg.DeleteRequestStore == "" {
return fmt.Errorf("compactor.delete-request-store should be configured when retention is enabled")
diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go
index f76e0f75da9f7..8a5f6c6811250 100644
--- a/pkg/loki/config_wrapper.go
+++ b/pkg/loki/config_wrapper.go
@@ -178,10 +178,11 @@ func applyInstanceConfigs(r, defaults *ConfigWrapper) {
}
}
-// applyCommonReplicationFactor apply the common replication factor to the Index Gateway ring.
+// applyCommonReplicationFactor apply the common replication factor to the Index Gateway and Bloom Gateway rings.
func applyCommonReplicationFactor(r, defaults *ConfigWrapper) {
if !reflect.DeepEqual(r.Common.ReplicationFactor, defaults.Common.ReplicationFactor) {
r.IndexGateway.Ring.ReplicationFactor = r.Common.ReplicationFactor
+ r.BloomGateway.Ring.ReplicationFactor = r.Common.ReplicationFactor
}
}
@@ -314,6 +315,7 @@ func applyConfigToRings(r, defaults *ConfigWrapper, rc lokiring.RingConfig, merg
r.BloomCompactor.Ring.InstanceZone = rc.InstanceZone
r.BloomCompactor.Ring.ZoneAwarenessEnabled = rc.ZoneAwarenessEnabled
r.BloomCompactor.Ring.KVStore = rc.KVStore
+ r.BloomCompactor.Ring.NumTokens = rc.NumTokens
}
// BloomGateway
@@ -327,6 +329,7 @@ func applyConfigToRings(r, defaults *ConfigWrapper, rc lokiring.RingConfig, merg
r.BloomGateway.Ring.InstanceZone = rc.InstanceZone
r.BloomGateway.Ring.ZoneAwarenessEnabled = rc.ZoneAwarenessEnabled
r.BloomGateway.Ring.KVStore = rc.KVStore
+ r.BloomGateway.Ring.NumTokens = rc.NumTokens
}
}
diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go
index 60c9223732d05..41705f012f020 100644
--- a/pkg/loki/config_wrapper_test.go
+++ b/pkg/loki/config_wrapper_test.go
@@ -1174,7 +1174,7 @@ func Test_applyIngesterRingConfig(t *testing.T) {
assert.Equal(t, 9,
reflect.TypeOf(distributor.RingConfig{}).NumField(),
fmt.Sprintf(msgf, reflect.TypeOf(distributor.RingConfig{}).String()))
- assert.Equal(t, 13,
+ assert.Equal(t, 15,
reflect.TypeOf(lokiring.RingConfig{}).NumField(),
fmt.Sprintf(msgf, reflect.TypeOf(lokiring.RingConfig{}).String()))
})
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index dac605dacb7d4..f44c079e51dff 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -223,6 +223,9 @@ func (c *Config) Validate() error {
if err := c.Querier.Validate(); err != nil {
return errors.Wrap(err, "invalid querier config")
}
+ if err := c.QueryScheduler.Validate(); err != nil {
+ return errors.Wrap(err, "invalid query_scheduler config")
+ }
if err := c.TableManager.Validate(); err != nil {
return errors.Wrap(err, "invalid tablemanager config")
}
@@ -241,6 +244,9 @@ func (c *Config) Validate() error {
if err := c.StorageConfig.BoltDBShipperConfig.Validate(); err != nil {
return errors.Wrap(err, "invalid boltdb-shipper config")
}
+ if err := c.IndexGateway.Validate(); err != nil {
+ return errors.Wrap(err, "invalid index_gateway config")
+ }
if err := c.CompactorConfig.Validate(); err != nil {
return errors.Wrap(err, "invalid compactor config")
}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index e5ca22dd65344..0761331f6b689 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -1345,7 +1345,7 @@ func (t *Loki) initBloomGatewayRing() (services.Service, error) {
if t.Cfg.isModuleEnabled(BloomGateway) || t.Cfg.isModuleEnabled(Backend) || legacyReadMode {
mode = lokiring.ServerMode
}
- manager, err := lokiring.NewRingManager(bloomGatewayRingKey, mode, t.Cfg.BloomGateway.Ring.RingConfig, t.Cfg.BloomGateway.Ring.ReplicationFactor, t.Cfg.BloomGateway.Ring.Tokens, util_log.Logger, prometheus.DefaultRegisterer)
+ manager, err := lokiring.NewRingManager(bloomGatewayRingKey, mode, t.Cfg.BloomGateway.Ring, t.Cfg.BloomGateway.Ring.ReplicationFactor, t.Cfg.BloomGateway.Ring.NumTokens, util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, gerrors.Wrap(err, "error initializing bloom gateway ring manager")
}
@@ -1442,7 +1442,7 @@ func (t *Loki) initIndexGatewayRing() (_ services.Service, err error) {
if t.Cfg.isModuleEnabled(IndexGateway) || legacyReadMode || t.Cfg.isModuleEnabled(Backend) {
managerMode = lokiring.ServerMode
}
- rm, err := lokiring.NewRingManager(indexGatewayRingKey, managerMode, t.Cfg.IndexGateway.Ring.RingConfig, t.Cfg.IndexGateway.Ring.ReplicationFactor, 128, util_log.Logger, prometheus.DefaultRegisterer)
+ rm, err := lokiring.NewRingManager(indexGatewayRingKey, managerMode, t.Cfg.IndexGateway.Ring, t.Cfg.IndexGateway.Ring.ReplicationFactor, indexgateway.NumTokens, util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, gerrors.Wrap(err, "new index gateway ring manager")
@@ -1493,7 +1493,7 @@ func (t *Loki) initBloomCompactorRing() (services.Service, error) {
// is LegacyMode needed?
// legacyReadMode := t.Cfg.LegacyReadTarget && t.isModuleActive(Read)
- rm, err := lokiring.NewRingManager(bloomCompactorRingKey, lokiring.ServerMode, t.Cfg.BloomCompactor.Ring.RingConfig, 1, t.Cfg.BloomCompactor.Ring.Tokens, util_log.Logger, prometheus.DefaultRegisterer)
+ rm, err := lokiring.NewRingManager(bloomCompactorRingKey, lokiring.ServerMode, t.Cfg.BloomCompactor.Ring, 1, t.Cfg.BloomCompactor.Ring.NumTokens, util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, gerrors.Wrap(err, "error initializing bloom-compactor ring manager")
}
@@ -1534,9 +1534,7 @@ func (t *Loki) initQuerySchedulerRing() (_ services.Service, err error) {
if t.Cfg.isModuleEnabled(QueryScheduler) || t.Cfg.isModuleEnabled(Backend) || t.Cfg.isModuleEnabled(All) || (t.Cfg.LegacyReadTarget && t.Cfg.isModuleEnabled(Read)) {
managerMode = lokiring.ServerMode
}
- rf := 2 // ringReplicationFactor should be 2 because we want 2 schedulers.
- tokens := 1 // we only need to insert 1 token to be used for leader election purposes.
- rm, err := lokiring.NewRingManager(schedulerRingKey, managerMode, t.Cfg.QueryScheduler.SchedulerRing, rf, tokens, util_log.Logger, prometheus.DefaultRegisterer)
+ rm, err := lokiring.NewRingManager(schedulerRingKey, managerMode, t.Cfg.QueryScheduler.SchedulerRing, scheduler.ReplicationFactor, scheduler.NumTokens, util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, gerrors.Wrap(err, "new scheduler ring manager")
diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go
index 5cd163ff0ffa1..4c26becce7a63 100644
--- a/pkg/scheduler/scheduler.go
+++ b/pkg/scheduler/scheduler.go
@@ -39,6 +39,13 @@ import (
lokiring "github.com/grafana/loki/pkg/util/ring"
)
+const (
+ // NumTokens is 1 since we only need to insert 1 token to be used for leader election purposes.
+ NumTokens = 1
+ // ReplicationFactor should be 2 because we want 2 schedulers.
+ ReplicationFactor = 2
+)
+
var errSchedulerIsNotRunning = errors.New("scheduler is not running")
// Scheduler is responsible for queueing and dispatching queries to Queriers.
@@ -111,7 +118,25 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.QuerierForgetDelay, "query-scheduler.querier-forget-delay", 0, "If a querier disconnects without sending notification about graceful shutdown, the query-scheduler will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.")
cfg.GRPCClientConfig.RegisterFlagsWithPrefix("query-scheduler.grpc-client-config", f)
f.BoolVar(&cfg.UseSchedulerRing, "query-scheduler.use-scheduler-ring", false, "Set to true to have the query schedulers create and place themselves in a ring. If no frontend_address or scheduler_address are present anywhere else in the configuration, Loki will toggle this value to true.")
- cfg.SchedulerRing.RegisterFlagsWithPrefix("query-scheduler.", "collectors/", f)
+
+ // Ring
+ skipFlags := []string{
+ "query-scheduler.ring.num-tokens",
+ "query-scheduler.ring.replication-factor",
+ }
+ cfg.SchedulerRing.RegisterFlagsWithPrefix("query-scheduler.", "collectors/", f, skipFlags...)
+ f.IntVar(&cfg.SchedulerRing.NumTokens, "query-scheduler.ring.num-tokens", NumTokens, fmt.Sprintf("IGNORED: Num tokens is fixed to %d", NumTokens))
+ f.IntVar(&cfg.SchedulerRing.ReplicationFactor, "query-scheduler.ring.replication-factor", ReplicationFactor, fmt.Sprintf("IGNORED: Replication factor is fixed to %d", ReplicationFactor))
+}
+
+func (cfg *Config) Validate() error {
+ if cfg.SchedulerRing.NumTokens != NumTokens {
+ return errors.New("Num tokens must not be changed as it will not take effect")
+ }
+ if cfg.SchedulerRing.ReplicationFactor != ReplicationFactor {
+ return errors.New("Replication factor must not be changed as it will not take effect")
+ }
+ return nil
}
// NewScheduler creates a new Scheduler.
diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/config.go b/pkg/storage/stores/shipper/indexshipper/indexgateway/config.go
index 137ef5dfa05be..884d29bf9e37c 100644
--- a/pkg/storage/stores/shipper/indexshipper/indexgateway/config.go
+++ b/pkg/storage/stores/shipper/indexshipper/indexgateway/config.go
@@ -3,10 +3,16 @@ package indexgateway
import (
"flag"
"fmt"
+ "github.com/pkg/errors"
"github.com/grafana/loki/pkg/util/ring"
)
+const (
+ NumTokens = 128
+ ReplicationFactor = 3
+)
+
// Mode represents in which mode an Index Gateway instance is running.
//
// Right now, two modes are supported: simple mode (default) and ring mode.
@@ -47,26 +53,6 @@ const (
RingMode Mode = "ring"
)
-// RingCfg is identical to ring.RingConfigWithRF with the difference that the
-// ReplicationFactor field is deprecated.
-type RingCfg struct {
- // InternalRingCfg configures the Index Gateway ring.
- ring.RingConfig `yaml:",inline"`
-
- // ReplicationFactor defines how many Index Gateway instances are assigned to each tenant.
- //
- // Whenever the store queries the ring key-value store for the Index Gateway instance responsible for tenant X,
- // multiple Index Gateway instances are expected to be returned as Index Gateway might be busy/locked for specific
- // reasons (this is assured by the spikey behavior of Index Gateway latencies).
- ReplicationFactor int `yaml:"replication_factor"`
-}
-
-// RegisterFlagsWithPrefix register all Index Gateway flags related to its ring but with a proper store prefix to avoid conflicts.
-func (cfg *RingCfg) RegisterFlags(prefix, storePrefix string, f *flag.FlagSet) {
- cfg.RegisterFlagsWithPrefix(prefix, storePrefix, f)
- f.IntVar(&cfg.ReplicationFactor, "replication-factor", 3, "Deprecated: How many index gateway instances are assigned to each tenant. Use -index-gateway.shard-size instead. The shard size is also a per-tenant setting.")
-}
-
// Config configures an Index Gateway server.
type Config struct {
// Mode configures in which mode the client will be running when querying and communicating with an Index Gateway instance.
@@ -76,11 +62,31 @@ type Config struct {
//
// In case it isn't explicitly set, it follows the same behavior of the other rings (ex: using the common configuration
// section and the ingester configuration by default).
- Ring RingCfg `yaml:"ring,omitempty" doc:"description=Defines the ring to be used by the index gateway servers and clients in case the servers are configured to run in 'ring' mode. In case this isn't configured, this block supports inheriting configuration from the common ring section."`
+ Ring ring.RingConfig `yaml:"ring,omitempty" doc:"description=Defines the ring to be used by the index gateway servers and clients in case the servers are configured to run in 'ring' mode. In case this isn't configured, this block supports inheriting configuration from the common ring section."`
}
// RegisterFlags register all IndexGatewayClientConfig flags and all the flags of its subconfigs but with a prefix (ex: shipper).
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
- cfg.Ring.RegisterFlags("index-gateway.", "collectors/", f)
f.StringVar((*string)(&cfg.Mode), "index-gateway.mode", SimpleMode.String(), "Defines in which mode the index gateway server will operate (default to 'simple'). It supports two modes:\n- 'simple': an index gateway server instance is responsible for handling, storing and returning requests for all indices for all tenants.\n- 'ring': an index gateway server instance is responsible for a subset of tenants instead of all tenants.")
+
+ // Ring
+ skipFlags := []string{
+ "index-gateway.ring.num-tokens",
+ "index-gateway.ring.replication-factor",
+ }
+ cfg.Ring.RegisterFlagsWithPrefix("index-gateway.", "collectors/", f, skipFlags...)
+ f.IntVar(&cfg.Ring.NumTokens, "index-gateway.ring.num-tokens", NumTokens, fmt.Sprintf("IGNORED: Num tokens is fixed to %d", NumTokens))
+ // ReplicationFactor defines how many Index Gateway instances are assigned to each tenant.
+ //
+ // Whenever the store queries the ring key-value store for the Index Gateway instance responsible for tenant X,
+ // multiple Index Gateway instances are expected to be returned as Index Gateway might be busy/locked for specific
+ // reasons (this is assured by the spikey behavior of Index Gateway latencies).
+ f.IntVar(&cfg.Ring.ReplicationFactor, "replication-factor", ReplicationFactor, "Deprecated: How many index gateway instances are assigned to each tenant. Use -index-gateway.shard-size instead. The shard size is also a per-tenant setting.")
+}
+
+func (cfg *Config) Validate() error {
+ if cfg.Ring.NumTokens != NumTokens {
+ return errors.New("Num tokens must not be changed as it will not take effect")
+ }
+ return nil
}
diff --git a/pkg/util/flagext/flagsetskip.go b/pkg/util/flagext/flagsetskip.go
new file mode 100644
index 0000000000000..c10e05d6969a6
--- /dev/null
+++ b/pkg/util/flagext/flagsetskip.go
@@ -0,0 +1,55 @@
+package flagext
+
+import (
+ "flag"
+ "time"
+)
+
+type FlagSetWithSkip struct {
+ *flag.FlagSet
+ skip map[string]struct{}
+}
+
+func NewFlagSetWithSkip(f *flag.FlagSet, skip []string) *FlagSetWithSkip {
+ skipMap := make(map[string]struct{}, len(skip))
+ for _, s := range skip {
+ skipMap[s] = struct{}{}
+ }
+ return &FlagSetWithSkip{f, skipMap}
+}
+
+func (f *FlagSetWithSkip) ToFlagSet() *flag.FlagSet {
+ return f.FlagSet
+}
+
+func (f *FlagSetWithSkip) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ if _, ok := f.skip[name]; !ok {
+ f.FlagSet.DurationVar(p, name, value, usage)
+ }
+}
+
+func (f *FlagSetWithSkip) StringVar(p *string, name string, value string, usage string) {
+ if _, ok := f.skip[name]; !ok {
+ f.FlagSet.StringVar(p, name, value, usage)
+ }
+}
+
+func (f *FlagSetWithSkip) BoolVar(p *bool, name string, value bool, usage string) {
+ if _, ok := f.skip[name]; !ok {
+ f.FlagSet.BoolVar(p, name, value, usage)
+ }
+}
+
+func (f *FlagSetWithSkip) IntVar(p *int, name string, value int, usage string) {
+ if _, ok := f.skip[name]; !ok {
+ f.FlagSet.IntVar(p, name, value, usage)
+ }
+}
+
+func (f *FlagSetWithSkip) Var(value flag.Value, name string, usage string) {
+ if _, ok := f.skip[name]; !ok {
+ f.FlagSet.Var(value, name, usage)
+ }
+}
+
+// TODO: Add more methods as needed.
diff --git a/pkg/util/ring/ring_config.go b/pkg/util/ring/ring_config.go
index eb9945ffcb3ec..779c40f4dad5c 100644
--- a/pkg/util/ring/ring_config.go
+++ b/pkg/util/ring/ring_config.go
@@ -15,6 +15,7 @@ import (
"github.com/grafana/dskit/netutil"
"github.com/grafana/dskit/ring"
+ util_flagext "github.com/grafana/loki/pkg/util/flagext"
util_log "github.com/grafana/loki/pkg/util/log"
)
@@ -28,6 +29,8 @@ type RingConfig struct { // nolint:revive
HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"`
TokensFilePath string `yaml:"tokens_file_path"`
ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"`
+ NumTokens int `yaml:"num_tokens"`
+ ReplicationFactor int `yaml:"replication_factor"`
// Instance details
InstanceID string `yaml:"instance_id" doc:"default=<hostname>"`
@@ -45,7 +48,9 @@ type RingConfig struct { // nolint:revive
// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet
// storePrefix is used to set the path in the KVStore and should end with a /
-func (cfg *RingConfig) RegisterFlagsWithPrefix(flagsPrefix, storePrefix string, f *flag.FlagSet) {
+func (cfg *RingConfig) RegisterFlagsWithPrefix(flagsPrefix, storePrefix string, fs *flag.FlagSet, skip ...string) {
+ f := util_flagext.NewFlagSetWithSkip(fs, skip)
+
hostname, err := os.Hostname()
if err != nil {
level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err)
@@ -53,11 +58,13 @@ func (cfg *RingConfig) RegisterFlagsWithPrefix(flagsPrefix, storePrefix string,
}
// Ring flags
- cfg.KVStore.RegisterFlagsWithPrefix(flagsPrefix+"ring.", storePrefix, f)
+ cfg.KVStore.RegisterFlagsWithPrefix(flagsPrefix+"ring.", storePrefix, f.ToFlagSet())
f.DurationVar(&cfg.HeartbeatPeriod, flagsPrefix+"ring.heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.")
f.DurationVar(&cfg.HeartbeatTimeout, flagsPrefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).")
f.StringVar(&cfg.TokensFilePath, flagsPrefix+"ring.tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.")
f.BoolVar(&cfg.ZoneAwarenessEnabled, flagsPrefix+"ring.zone-awareness-enabled", false, "True to enable zone-awareness and replicate blocks across different availability zones.")
+ f.IntVar(&cfg.NumTokens, flagsPrefix+"ring.num-tokens", 128, "Number of tokens to own in the ring.")
+ f.IntVar(&cfg.ReplicationFactor, flagsPrefix+"ring.replication-factor", 3, "Factor for data replication.")
// Instance flags
cfg.InstanceInterfaceNames = netutil.PrivateNetworkInterfacesWithFallback([]string{"eth0", "en0"}, util_log.Logger)
@@ -123,18 +130,3 @@ func (cfg *RingConfig) ToRingConfig(replicationFactor int) ring.Config {
return rc
}
-
-// RingConfigWithRF is a wrapper for our internally used ring configuration plus the replication factor.
-type RingConfigWithRF struct { // nolint:revive
- // RingConfig configures the ring.
- RingConfig `yaml:",inline"`
-
- // ReplicationFactor defines how many replicas store a single data shard.
- ReplicationFactor int `yaml:"replication_factor"`
-}
-
-// RegisterFlagsWithPrefix registers all Bloom Gateway CLI flags.
-func (cfg *RingConfigWithRF) RegisterFlagsWithPrefix(prefix, storePrefix string, f *flag.FlagSet) {
- cfg.RingConfig.RegisterFlagsWithPrefix(prefix, storePrefix, f)
- f.IntVar(&cfg.ReplicationFactor, prefix+"replication-factor", 3, "Factor for data replication.")
-}
diff --git a/tools/doc-generator/writer.go b/tools/doc-generator/writer.go
index a13613c7fbffd..f182d0a8600c5 100644
--- a/tools/doc-generator/writer.go
+++ b/tools/doc-generator/writer.go
@@ -27,18 +27,20 @@ func (w *specWriter) writeConfigBlock(b *parse.ConfigBlock, indent int) {
return
}
+ var written bool
for i, entry := range b.Entries {
// Add a new line to separate from the previous entry
- if i > 0 {
+ if written && i > 0 {
w.out.WriteString("\n")
}
- w.writeConfigEntry(entry, indent)
+ written = w.writeConfigEntry(entry, indent)
}
}
// nolint:goconst
-func (w *specWriter) writeConfigEntry(e *parse.ConfigEntry, indent int) {
+func (w *specWriter) writeConfigEntry(e *parse.ConfigEntry, indent int) (written bool) {
+ written = true
if e.Kind == parse.KindBlock {
// If the block is a root block it will have its dedicated section in the doc,
// so here we've just to write down the reference without re-iterating on it.
@@ -64,6 +66,11 @@ func (w *specWriter) writeConfigEntry(e *parse.ConfigEntry, indent int) {
}
if e.Kind == parse.KindField || e.Kind == parse.KindSlice || e.Kind == parse.KindMap {
+ if strings.HasPrefix(e.Description(), "IGNORED:") {
+ // We skip documenting any field whose description starts with "IGNORED:".
+ return false
+ }
+
// Description
w.writeComment(e.Description(), indent, 0)
w.writeExample(e.FieldExample, indent)
@@ -87,6 +94,8 @@ func (w *specWriter) writeConfigEntry(e *parse.ConfigEntry, indent int) {
w.out.WriteString(pad(indent) + "[" + e.Name + ": <" + e.FieldType + ">" + defaultValue + "]\n")
}
}
+
+ return written
}
func (w *specWriter) writeFlag(name string, indent int) {
|
refactor
|
Add RF and Tokens to Loki ring Cfg and allow overwriting docs. (#12142)
|
9803eab94749ad7bcb5fb647f76ab5fc85eeb5ea
|
2020-05-15 01:04:29
|
Ed Welch
|
loki: Allow configuring query_store_max_look_back_period when running a filesystem store and boltdb-shipper (#2073)
| false
|
diff --git a/docs/configuration/README.md b/docs/configuration/README.md
index 0fec5b745742e..ce7047539bec2 100644
--- a/docs/configuration/README.md
+++ b/docs/configuration/README.md
@@ -317,6 +317,13 @@ The `ingester_config` block configures Ingesters.
# The maximum duration of a timeseries chunk in memory. If a timeseries runs for longer than this the current chunk will be flushed to the store and a new chunk created.
[max_chunk_age: <duration> | default = 1h]
+# How far in the past an ingester is allowed to query the store for data.
+# This is only useful for running multiple loki binaries with a shared ring with a `filesystem` store which is NOT shared between the binaries
+# When using any "shared" object store like S3 or GCS this value must always be left as 0
+# It is an error to configure this to a non-zero value when using any object store other than `filesystem`
+# Use a value of -1 to allow the ingester to query the store infinitely far back in time.
+[query_store_max_look_back_period: <duration> | default = 0]
+
```
### lifecycler_config
diff --git a/docs/operations/storage/README.md b/docs/operations/storage/README.md
index d8b36c57acef4..effa70216166f 100644
--- a/docs/operations/storage/README.md
+++ b/docs/operations/storage/README.md
@@ -26,6 +26,7 @@ The following are supported for the index:
* [Google Bigtable](https://cloud.google.com/bigtable)
* [Apache Cassandra](https://cassandra.apache.org)
* [BoltDB](https://github.com/boltdb/bolt) (doesn't work when clustering Loki)
+* [Boltb-Shipper](boltdb-shipper.md) EXPERIMENTAL index store which stores boltdb index files in the object store
The following are supported for the chunks:
@@ -34,7 +35,7 @@ The following are supported for the chunks:
* [Apache Cassandra](https://cassandra.apache.org)
* [Amazon S3](https://aws.amazon.com/s3)
* [Google Cloud Storage](https://cloud.google.com/storage/)
-* Filesystem (doesn't work when clustering Loki)
+* [Filesystem](filesystem.md) (please read more about the filesystem to understand the pros/cons before using with production data)
## Cloud Storage Permissions
diff --git a/docs/operations/storage/filesystem.md b/docs/operations/storage/filesystem.md
new file mode 100644
index 0000000000000..7a161ef256929
--- /dev/null
+++ b/docs/operations/storage/filesystem.md
@@ -0,0 +1,141 @@
+# Filesystem Object Store
+
+The filesystem object store is the easiest to get started with Loki but there are some pros/cons to this approach.
+
+Very simply it stores all the objects (chunks) in the specified directory:
+
+```yaml
+storage_config:
+ filesystem:
+ directory: /tmp/loki/
+```
+
+A folder is created for every tenant all the chunks for one tenant are stored in that directory.
+
+If loki is run in single-tenant mode, all the chunks are put in a folder named `fake` which is the synthesized tenant name used for single tenant mode.
+
+See [multi-tenancy](../multi-tenancy.md) for more information.
+
+## Pros
+
+Very simple, no additional software required to use Loki when paired with the BoltDB index store.
+
+Great for low volume applications, proof of concepts, and just playing around with Loki.
+
+## Cons
+
+### Scaling
+
+At some point there is a limit to how many chunks can be stored in a single directory, for example see [this issue](https://github.com/grafana/loki/issues/1502) which explains how a Loki user ran into a strange error with about **5.5 million chunk files** in their file store (and also a workaround for the problem).
+
+However, if you keep your streams low (remember loki writes a chunk per stream) and use configs like `chunk_target_size` (around 1MB), `max_chunk_age` (increase beyond 1h), `chunk_idle_period` (increase to match `max_chunk_age`) can be tweaked to reduce the number of chunks flushed (although they will trade for more memory consumption).
+
+It's still very possible to store terabytes of log data with the filestore, but realize there are limitations to how many files a filesystem will want to store in a single directory.
+
+### Durability
+
+The durability of the objects is at the mercy of the filesystem itself where other object stores like S3/GCS do a lot behind the scenes to offer extremely high durability to your data.
+
+### High Availability
+
+Running Loki clustered is not possible with the filesystem store unless the filesystem is shared in some fashion (NFS for example). However using shared filesystems is likely going to be a bad experience with Loki just as it is for almost every other application.
+
+## New AND VERY EXPERIMENTAL in 1.5.0: Horizontal scaling of the filesystem store
+
+**WARNING** as the title suggests, this is very new and potentially buggy, and it is also very likely configs around this feature will change over time.
+
+With that warning out of the way, the addition of the [boltdb-shipper](boltdb-shipper.md) index store has added capabilities making it possible to overcome many of the limitations listed above using the filesystem store, specifically running Loki with the filesystem store on separate machines but still operate as a cluster supporting replication, and write distribution via the hash ring.
+
+As mentioned in the title, this is very alpha at this point but we would love for people to try this and help us flush out bugs.
+
+Here is an example config to run with Loki:
+
+Use this config on multiple computers (or containers), do not run it on the same computer as Loki uses the hostname as the ID in the ring.
+
+Do not use a shared fileystem such as NFS for this, each machine should have its own filesystem
+
+```yaml
+auth_enabled: false # single tenant mode
+
+server:
+ http_listen_port: 3100
+
+ingester:
+ max_transfer_retries: 0 # Disable blocks transfers on ingesters shutdown or rollout.
+ chunk_idle_period: 2h # Let chunks sit idle for at least 2h before flushing, this helps to reduce total chunks in store
+ max_chunk_age: 2h # Let chunks get at least 2h old before flushing due to age, this helps to reduce total chunks in store
+ chunk_target_size: 1048576 # Target chunks of 1MB, this helps to reduce total chunks in store
+ chunk_retain_period: 30s
+
+ query_store_max_look_back_period: -1 # This will allow the ingesters to query the store for all data
+ lifecycler:
+ heartbeat_period: 5s
+ interface_names:
+ - eth0
+ join_after: 30s
+ num_tokens: 512
+ ring:
+ heartbeat_timeout: 1m
+ kvstore:
+ consul:
+ consistent_reads: true
+ host: localhost:8500
+ http_client_timeout: 20s
+ store: consul
+ replication_factor: 1 # This can be increased and probably should if you are running multiple machines!
+
+schema_config:
+ configs:
+ - from: 2018-04-15
+ store: boltdb-shipper
+ object_store: filesystem
+ schema: v11
+ index:
+ prefix: index_
+ period: 168h
+
+storage_config:
+ boltdb_shipper:
+ shared_store: filesystem
+ active_index_directory: /tmp/loki/index
+ cache_location: /tmp/loki/boltdb-cache
+ filesystem:
+ directory: /tmp/loki/chunks
+
+limits_config:
+ enforce_metric_name: false
+ reject_old_samples: true
+ reject_old_samples_max_age: 168h
+
+chunk_store_config:
+ max_look_back_period: 0s # No limit how far we can look back in the store
+
+table_manager:
+ retention_deletes_enabled: false
+ retention_period: 0s # No deletions, infinite retention
+```
+
+It does require Consul to be running for the ring (any of the ring stores will work: consul, etcd, memberlist, Consul is used in this example)
+
+It is also required that Consul be available from each machine, this example only specifies `host: localhost:8500` you would likely need to change this to the correct hostname/ip and port of your consul server.
+
+**The config needs to be the same on every Loki instance!**
+
+The important piece of this config is `query_store_max_look_back_period: -1` this tells Loki to allow the ingesters to look in the store for all the data.
+
+Traffic can be sent to any of the Loki servers, it can be round-robin load balanced if desired.
+
+Each Loki instance will use Consul to properly route both read and write data to the correct Loki instance.
+
+Scaling up is as easy as adding more loki instances and letting them talk to the same ring.
+
+Scaling down is harder but possible. You would need to shutdown a Loki server then take everything in:
+
+```yaml
+ filesystem:
+ directory: /tmp/loki/chunks
+```
+
+And copy it to the same directory on another Loki server, there is currently no way to split the chunks between servers you must move them all. We expect to provide more options here in the future.
+
+
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 41e5fa8cdf02d..d97f7c9e4a659 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -10,6 +10,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/common/model"
"github.com/weaveworks/common/user"
"google.golang.org/grpc/health/grpc_health_v1"
@@ -25,6 +26,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/stats"
+ listutil "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
@@ -64,7 +66,7 @@ type Config struct {
ingesterClientFactory func(cfg client.Config, addr string) (client.HealthAndIngesterClient, error)
QueryStore bool `yaml:"-"`
- QueryStoreMaxLookBackPeriod time.Duration `yaml:"-"`
+ QueryStoreMaxLookBackPeriod time.Duration `yaml:"query_store_max_look_back_period"`
}
// RegisterFlags registers the flags.
@@ -84,6 +86,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.Float64Var(&cfg.SyncMinUtilization, "ingester.sync-min-utilization", 0, "Minimum utilization of chunk when doing synchronization.")
f.IntVar(&cfg.MaxReturnedErrors, "ingester.max-ignored-stream-errors", 10, "Maximum number of ignored stream errors to return. 0 to return all errors.")
f.DurationVar(&cfg.MaxChunkAge, "ingester.max-chunk-age", time.Hour, "Maximum chunk age before flushing.")
+ f.DurationVar(&cfg.QueryStoreMaxLookBackPeriod, "ingester.query-store-max-look-back-period", 0, "How far back should an ingester be allowed to query the store for data, for use only with boltdb-shipper index and filesystem object store. -1 for infinite.")
}
// Ingester builds chunks for incoming log streams.
@@ -311,7 +314,50 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp
}
instance := i.getOrCreateInstance(instanceID)
- return instance.Label(ctx, req)
+ resp, err := instance.Label(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ // Only continue if we should query the store for labels
+ if !i.cfg.QueryStore {
+ return resp, nil
+ }
+
+ // Only continue if the store is a chunk.Store
+ var cs chunk.Store
+ var ok bool
+ if cs, ok = i.store.(chunk.Store); !ok {
+ return resp, nil
+ }
+
+ userID, err := user.ExtractOrgID(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // Adjust the start time based on QueryStoreMaxLookBackPeriod.
+ start := adjustQueryStartTime(i.cfg, *req.Start)
+ if start.After(*req.End) {
+ // The request is older than we are allowed to query the store, just return what we have.
+ return resp, nil
+ }
+ from, through := model.TimeFromUnixNano(start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano())
+ var storeValues []string
+ if req.Values {
+ storeValues, err = cs.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ storeValues, err = cs.LabelNamesForMetricName(ctx, userID, from, through, "logs")
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &logproto.LabelResponse{
+ Values: listutil.MergeStringLists(resp.Values, storeValues),
+ }, nil
}
// Series queries the ingester for log stream identifiers (label sets) matching a set of matchers
@@ -414,12 +460,7 @@ func buildStoreRequest(cfg Config, req *logproto.QueryRequest) *logproto.QueryRe
}
start := req.Start
end := req.End
- if cfg.QueryStoreMaxLookBackPeriod != 0 {
- oldestStartTime := time.Now().Add(-cfg.QueryStoreMaxLookBackPeriod)
- if oldestStartTime.After(req.Start) {
- start = oldestStartTime
- }
- }
+ start = adjustQueryStartTime(cfg, start)
if start.After(end) {
return nil
@@ -431,3 +472,13 @@ func buildStoreRequest(cfg Config, req *logproto.QueryRequest) *logproto.QueryRe
return &newRequest
}
+
+func adjustQueryStartTime(cfg Config, start time.Time) time.Time {
+ if cfg.QueryStoreMaxLookBackPeriod > 0 {
+ oldestStartTime := time.Now().Add(-cfg.QueryStoreMaxLookBackPeriod)
+ if oldestStartTime.After(start) {
+ start = oldestStartTime
+ }
+ }
+ return start
+}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index dc85e06995b86..07f22d4cd474b 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -1,6 +1,7 @@
package loki
import (
+ "errors"
"fmt"
"net/http"
"os"
@@ -163,7 +164,9 @@ func (t *Loki) initQuerier() (services.Service, error) {
if err != nil {
return nil, err
}
-
+ if t.cfg.Ingester.QueryStoreMaxLookBackPeriod != 0 {
+ t.cfg.Querier.IngesterQueryStoreMaxLookback = t.cfg.Ingester.QueryStoreMaxLookBackPeriod
+ }
t.querier, err = querier.New(t.cfg.Querier, t.cfg.IngesterClient, t.ring, t.store, t.overrides)
if err != nil {
return nil, err
@@ -198,10 +201,14 @@ func (t *Loki) initIngester() (_ services.Service, err error) {
t.cfg.Ingester.LifecyclerConfig.ListenPort = t.cfg.Server.GRPCListenPort
// We want ingester to also query the store when using boltdb-shipper
- if activeIndexType(t.cfg.SchemaConfig) == local.BoltDBShipperType {
+ pc := activePeriodConfig(t.cfg.SchemaConfig)
+ if pc.IndexType == local.BoltDBShipperType {
t.cfg.Ingester.QueryStore = true
- // When using shipper, limit max look back for query to MaxChunkAge + upload interval by shipper + 15 mins to query only data whose index is not pushed yet
- t.cfg.Ingester.QueryStoreMaxLookBackPeriod = t.cfg.Ingester.MaxChunkAge + local.ShipperFileUploadInterval + (15 * time.Minute)
+ mlb, err := calculateMaxLookBack(pc, t.cfg.Ingester.QueryStoreMaxLookBackPeriod, t.cfg.Ingester.MaxChunkAge)
+ if err != nil {
+ return nil, err
+ }
+ t.cfg.Ingester.QueryStoreMaxLookBackPeriod = mlb
}
t.ingester, err = ingester.New(t.cfg.Ingester, t.cfg.IngesterClient, t.store, t.overrides)
@@ -256,7 +263,7 @@ func (t *Loki) initTableManager() (services.Service, error) {
}
func (t *Loki) initStore() (_ services.Service, err error) {
- if activeIndexType(t.cfg.SchemaConfig) == local.BoltDBShipperType {
+ if activePeriodConfig(t.cfg.SchemaConfig).IndexType == local.BoltDBShipperType {
t.cfg.StorageConfig.BoltDBShipperConfig.IngesterName = t.cfg.Ingester.LifecyclerConfig.ID
switch t.cfg.Target {
case Ingester:
@@ -483,9 +490,9 @@ var modules = map[moduleName]module{
},
}
-// activeIndexType type returns index type which would be applicable to logs that would be pushed starting now
+// activePeriodConfig type returns index type which would be applicable to logs that would be pushed starting now
// Note: Another periodic config can be applicable in future which can change index type
-func activeIndexType(cfg chunk.SchemaConfig) string {
+func activePeriodConfig(cfg chunk.SchemaConfig) chunk.PeriodConfig {
now := model.Now()
i := sort.Search(len(cfg.Configs), func(i int) bool {
return cfg.Configs[i].From.Time > now
@@ -493,5 +500,25 @@ func activeIndexType(cfg chunk.SchemaConfig) string {
if i > 0 {
i--
}
- return cfg.Configs[i].IndexType
+ return cfg.Configs[i]
+}
+
+func calculateMaxLookBack(pc chunk.PeriodConfig, maxLookBackConfig, maxChunkAge time.Duration) (time.Duration, error) {
+ if pc.ObjectType != local.FilesystemObjectStoreType && maxLookBackConfig.Nanoseconds() != 0 {
+ return 0, errors.New("it is an error to specify a non zero `query_store_max_look_back_period` value when using any object store other than `filesystem`")
+ }
+ // When using shipper, limit max look back for query to MaxChunkAge + upload interval by shipper + 15 mins to query only data whose index is not pushed yet
+ defaultMaxLookBack := maxChunkAge + local.ShipperFileUploadInterval + (15 * time.Minute)
+
+ if maxLookBackConfig == 0 {
+ // If the QueryStoreMaxLookBackPeriod is still it's default value of 0, set it to the default calculated value.
+ return defaultMaxLookBack, nil
+ } else if maxLookBackConfig > 0 && maxLookBackConfig < defaultMaxLookBack {
+ // If the QueryStoreMaxLookBackPeriod is > 0 (-1 is allowed for infinite), make sure it's at least greater than the default or throw an error
+ return 0, fmt.Errorf("the configured query_store_max_look_back_period of '%v' is less than the calculated default of '%v' "+
+ "which is calculated based on the max_chunk_age + 15 minute boltdb-shipper interval + 15 min additional buffer. Increase this value"+
+ "greater than the default or remove it from the configuration to use the default", maxLookBackConfig, defaultMaxLookBack)
+
+ }
+ return maxLookBackConfig, nil
}
diff --git a/pkg/loki/modules_test.go b/pkg/loki/modules_test.go
index c306247388200..a997a307cb923 100644
--- a/pkg/loki/modules_test.go
+++ b/pkg/loki/modules_test.go
@@ -48,20 +48,95 @@ func TestActiveIndexType(t *testing.T) {
IndexType: "first",
}}
- assert.Equal(t, "first", activeIndexType(cfg))
+ assert.Equal(t, cfg.Configs[0], activePeriodConfig(cfg))
// add a newer PeriodConfig in the past which should be considered
cfg.Configs = append(cfg.Configs, chunk.PeriodConfig{
From: chunk.DayTime{Time: model.Now().Add(-12 * time.Hour)},
IndexType: "second",
})
- assert.Equal(t, "second", activeIndexType(cfg))
+ assert.Equal(t, cfg.Configs[1], activePeriodConfig(cfg))
// add a newer PeriodConfig in the future which should not be considered
cfg.Configs = append(cfg.Configs, chunk.PeriodConfig{
From: chunk.DayTime{Time: model.Now().Add(time.Hour)},
IndexType: "third",
})
- assert.Equal(t, "second", activeIndexType(cfg))
+ assert.Equal(t, cfg.Configs[1], activePeriodConfig(cfg))
}
+
+func Test_calculateMaxLookBack(t *testing.T) {
+ type args struct {
+ pc chunk.PeriodConfig
+ maxLookBackConfig time.Duration
+ maxChunkAge time.Duration
+ }
+ tests := []struct {
+ name string
+ args args
+ want time.Duration
+ wantErr bool
+ }{
+ {
+ name: "default",
+ args: args{
+ pc: chunk.PeriodConfig{
+ ObjectType: "filesystem",
+ },
+ maxLookBackConfig: 0,
+ maxChunkAge: 1 * time.Hour,
+ },
+ want: 90 * time.Minute,
+ wantErr: false,
+ },
+ {
+ name: "infinite",
+ args: args{
+ pc: chunk.PeriodConfig{
+ ObjectType: "filesystem",
+ },
+ maxLookBackConfig: -1,
+ maxChunkAge: 1 * time.Hour,
+ },
+ want: -1,
+ wantErr: false,
+ },
+ {
+ name: "invalid store type",
+ args: args{
+ pc: chunk.PeriodConfig{
+ ObjectType: "gcs",
+ },
+ maxLookBackConfig: -1,
+ maxChunkAge: 1 * time.Hour,
+ },
+ want: 0,
+ wantErr: true,
+ },
+ {
+ name: "less than default",
+ args: args{
+ pc: chunk.PeriodConfig{
+ ObjectType: "filesystem",
+ },
+ maxLookBackConfig: 1 * time.Hour,
+ maxChunkAge: 1 * time.Hour,
+ },
+ want: 0,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := calculateMaxLookBack(tt.args.pc, tt.args.maxLookBackConfig, tt.args.maxChunkAge)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("calculateMaxLookBack() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("calculateMaxLookBack() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go
index 5c445b5c9431b..f90be200d9a13 100644
--- a/pkg/querier/querier.go
+++ b/pkg/querier/querier.go
@@ -26,6 +26,7 @@ import (
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/stats"
"github.com/grafana/loki/pkg/storage"
+ listutil "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/validation"
)
@@ -38,12 +39,13 @@ const (
// Config for a querier.
type Config struct {
- QueryTimeout time.Duration `yaml:"query_timeout"`
- TailMaxDuration time.Duration `yaml:"tail_max_duration"`
- ExtraQueryDelay time.Duration `yaml:"extra_query_delay,omitempty"`
- IngesterMaxQueryLookback time.Duration `yaml:"query_ingesters_within,omitempty"`
- Engine logql.EngineOpts `yaml:"engine,omitempty"`
- MaxConcurrent int `yaml:"max_concurrent"`
+ QueryTimeout time.Duration `yaml:"query_timeout"`
+ TailMaxDuration time.Duration `yaml:"tail_max_duration"`
+ ExtraQueryDelay time.Duration `yaml:"extra_query_delay,omitempty"`
+ QueryIngestersWithin time.Duration `yaml:"query_ingesters_within,omitempty"`
+ IngesterQueryStoreMaxLookback time.Duration `yaml:"-"`
+ Engine logql.EngineOpts `yaml:"engine,omitempty"`
+ MaxConcurrent int `yaml:"max_concurrent"`
}
// RegisterFlags register flags.
@@ -51,7 +53,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.TailMaxDuration, "querier.tail-max-duration", 1*time.Hour, "Limit the duration for which live tailing request would be served")
f.DurationVar(&cfg.QueryTimeout, "querier.query_timeout", 1*time.Minute, "Timeout when querying backends (ingesters or storage) during the execution of a query request")
f.DurationVar(&cfg.ExtraQueryDelay, "distributor.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.")
- f.DurationVar(&cfg.IngesterMaxQueryLookback, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.")
+ f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.")
f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, "The maximum number of concurrent queries.")
}
@@ -144,14 +146,42 @@ func (q *Querier) Select(ctx context.Context, params logql.SelectParams) (iter.E
return nil, err
}
- chunkStoreIter, err := q.store.LazyQuery(ctx, params)
- if err != nil {
- return nil, err
+ var chunkStoreIter iter.EntryIterator
+
+ if q.cfg.IngesterQueryStoreMaxLookback == 0 {
+ // IngesterQueryStoreMaxLookback is zero, the default state, query the store normally
+ chunkStoreIter, err = q.store.LazyQuery(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+ } else if q.cfg.IngesterQueryStoreMaxLookback > 0 {
+ // IngesterQueryStoreMaxLookback is greater than zero
+ // Adjust the store query range to only query for data ingesters are not already querying for
+ adjustedEnd := params.End.Add(-q.cfg.IngesterQueryStoreMaxLookback)
+ if params.Start.After(adjustedEnd) {
+ chunkStoreIter = iter.NoopIterator
+ } else {
+ // Make a copy of the request before modifying
+ // because the initial request is used below to query ingesters
+ queryRequestCopy := *params.QueryRequest
+ newParams := logql.SelectParams{
+ QueryRequest: &queryRequestCopy,
+ }
+ newParams.End = adjustedEnd
+ chunkStoreIter, err = q.store.LazyQuery(ctx, newParams)
+ if err != nil {
+ return nil, err
+ }
+ }
+ } else {
+ // IngesterQueryStoreMaxLookback is less than zero
+ // ingesters will be querying all the way back in time so there is no reason to query the store
+ chunkStoreIter = iter.NoopIterator
}
- // skip ingester queries only when IngesterMaxQueryLookback is enabled (not the zero value) and
+ // skip ingester queries only when QueryIngestersWithin is enabled (not the zero value) and
// the end of the query is earlier than the lookback
- if lookback := time.Now().Add(-q.cfg.IngesterMaxQueryLookback); q.cfg.IngesterMaxQueryLookback != 0 && params.GetEnd().Before(lookback) {
+ if lookback := time.Now().Add(-q.cfg.QueryIngestersWithin); q.cfg.QueryIngestersWithin != 0 && params.GetEnd().Before(lookback) {
return chunkStoreIter, nil
}
@@ -217,7 +247,7 @@ func (q *Querier) Label(ctx context.Context, req *logproto.LabelRequest) (*logpr
results = append(results, storeValues)
return &logproto.LabelResponse{
- Values: mergeLists(results...),
+ Values: listutil.MergeStringLists(results...),
}, nil
}
@@ -226,45 +256,6 @@ func (*Querier) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (
return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil
}
-func mergeLists(ss ...[]string) []string {
- switch len(ss) {
- case 0:
- return nil
- case 1:
- return ss[0]
- case 2:
- return mergePair(ss[0], ss[1])
- default:
- n := len(ss) / 2
- return mergePair(mergeLists(ss[:n]...), mergeLists(ss[n:]...))
- }
-}
-
-func mergePair(s1, s2 []string) []string {
- i, j := 0, 0
- result := make([]string, 0, len(s1)+len(s2))
- for i < len(s1) && j < len(s2) {
- if s1[i] < s2[j] {
- result = append(result, s1[i])
- i++
- } else if s1[i] > s2[j] {
- result = append(result, s2[j])
- j++
- } else {
- result = append(result, s1[i])
- i++
- j++
- }
- }
- for ; i < len(s1); i++ {
- result = append(result, s1[i])
- }
- for ; j < len(s2); j++ {
- result = append(result, s2[j])
- }
- return result
-}
-
// Tail keeps getting matching logs from all ingesters for given query
func (q *Querier) Tail(ctx context.Context, req *logproto.TailRequest) (*Tailer, error) {
err := q.checkTailRequestLimit(ctx)
diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go
index b6746adf80da5..a38463ce6c76d 100644
--- a/pkg/querier/querier_test.go
+++ b/pkg/querier/querier_test.go
@@ -496,7 +496,7 @@ func TestQuerier_IngesterMaxQueryLookback(t *testing.T) {
store.On("LazyQuery", mock.Anything, mock.Anything).Return(mockStreamIterator(0, 1), nil)
conf := mockQuerierConfig()
- conf.IngesterMaxQueryLookback = tc.lookback
+ conf.QueryIngestersWithin = tc.lookback
q, err := newQuerier(
conf,
mockIngesterClientConfig(),
diff --git a/pkg/storage/stores/local/shipper.go b/pkg/storage/stores/local/shipper.go
index bd60ea2be1ba4..1833f81af1617 100644
--- a/pkg/storage/stores/local/shipper.go
+++ b/pkg/storage/stores/local/shipper.go
@@ -34,6 +34,9 @@ const (
// BoltDBShipperType holds the index type for using boltdb with shipper which keeps flushing them to a shared storage
BoltDBShipperType = "boltdb-shipper"
+ // FilesystemObjectStoreType holds the periodic config type for the filesystem store
+ FilesystemObjectStoreType = "filesystem"
+
cacheCleanupInterval = 24 * time.Hour
storageKeyPrefix = "index/"
)
@@ -128,7 +131,7 @@ func NewShipper(cfg ShipperConfig, storageClient chunk.ObjectClient, boltDBGette
// avoid uploading same files again with different name. If the filed does not exist we would create one with uploader name set to
// ingester name and startup timestamp so that we randomise the name and do not override files from other ingesters.
func (s *Shipper) getUploaderName() (string, error) {
- uploader := fmt.Sprintf("%s-%d", s.cfg.IngesterName, time.Now().Unix())
+ uploader := fmt.Sprintf("%s-%d", s.cfg.IngesterName, time.Now().UnixNano())
uploaderFilePath := path.Join(s.cfg.ActiveIndexDirectory, "uploader", "name")
if err := chunk_util.EnsureDirectory(path.Dir(uploaderFilePath)); err != nil {
diff --git a/pkg/util/list.go b/pkg/util/list.go
new file mode 100644
index 0000000000000..0fba89940eed1
--- /dev/null
+++ b/pkg/util/list.go
@@ -0,0 +1,40 @@
+package util
+
+func MergeStringLists(ss ...[]string) []string {
+ switch len(ss) {
+ case 0:
+ return nil
+ case 1:
+ return ss[0]
+ case 2:
+ return MergeStringPair(ss[0], ss[1])
+ default:
+ n := len(ss) / 2
+ return MergeStringPair(MergeStringLists(ss[:n]...), MergeStringLists(ss[n:]...))
+ }
+}
+
+func MergeStringPair(s1, s2 []string) []string {
+ i, j := 0, 0
+ result := make([]string, 0, len(s1)+len(s2))
+ for i < len(s1) && j < len(s2) {
+ if s1[i] < s2[j] {
+ result = append(result, s1[i])
+ i++
+ } else if s1[i] > s2[j] {
+ result = append(result, s2[j])
+ j++
+ } else {
+ result = append(result, s1[i])
+ i++
+ j++
+ }
+ }
+ for ; i < len(s1); i++ {
+ result = append(result, s1[i])
+ }
+ for ; j < len(s2); j++ {
+ result = append(result, s2[j])
+ }
+ return result
+}
|
loki
|
Allow configuring query_store_max_look_back_period when running a filesystem store and boltdb-shipper (#2073)
|
95c49a36775c91daa3f11d30f3850bc0db8f1d2c
|
2024-07-19 18:55:21
|
George Robinson
|
chore: replace pkg/errors with errors (#13581)
| false
|
diff --git a/pkg/ingester-rf1/ingester.go b/pkg/ingester-rf1/ingester.go
index 9c3f4da561ce8..546a42a56d93d 100644
--- a/pkg/ingester-rf1/ingester.go
+++ b/pkg/ingester-rf1/ingester.go
@@ -3,6 +3,7 @@ package ingesterrf1
import (
"bytes"
"context"
+ "errors"
"flag"
"fmt"
"io"
@@ -34,7 +35,6 @@ import (
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/tenant"
- "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/health/grpc_health_v1"
@@ -386,7 +386,7 @@ func (i *Ingester) starting(ctx context.Context) error {
shutdownMarkerPath := path.Join(i.cfg.ShutdownMarkerPath, shutdownMarkerFilename)
shutdownMarker, err := shutdownMarkerExists(shutdownMarkerPath)
if err != nil {
- return errors.Wrap(err, "failed to check ingester shutdown marker")
+ return fmt.Errorf("failed to check ingester shutdown marker: %w", err)
}
if shutdownMarker {
diff --git a/pkg/ingester-rf1/metastore/metastore.go b/pkg/ingester-rf1/metastore/metastore.go
index dfec6a567de78..6d999b8edd518 100644
--- a/pkg/ingester-rf1/metastore/metastore.go
+++ b/pkg/ingester-rf1/metastore/metastore.go
@@ -2,6 +2,7 @@ package metastore
import (
"context"
+ "errors"
"flag"
"fmt"
"net"
@@ -17,7 +18,6 @@ import (
"github.com/grafana/dskit/services"
"github.com/hashicorp/raft"
raftwal "github.com/hashicorp/raft-wal"
- "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/grafana/loki/v3/pkg/ingester-rf1/metastore/health"
diff --git a/pkg/ingester-rf1/stream.go b/pkg/ingester-rf1/stream.go
index f0c556e341088..b0b47a2c8ff93 100644
--- a/pkg/ingester-rf1/stream.go
+++ b/pkg/ingester-rf1/stream.go
@@ -3,13 +3,13 @@ package ingesterrf1
import (
"bytes"
"context"
+ "errors"
"fmt"
"net/http"
"time"
"github.com/grafana/dskit/httpgrpc"
"github.com/opentracing/opentracing-go"
- "github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
|
chore
|
replace pkg/errors with errors (#13581)
|
2659e3597c4380c5a67c053f32159ef9bc9df905
|
2024-03-13 14:11:12
|
Sandeep Sukhani
|
fix: fix middlewares being applied for series request when querier is running as a standalone service (#12194)
| false
|
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index a3d9937734c83..e5ca22dd65344 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -415,6 +415,7 @@ func (t *Loki) initQuerier() (services.Service, error) {
indexStatsHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.IndexStats", t.Overrides)
volumeHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.VolumeInstant", t.Overrides)
volumeRangeHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.VolumeRange", t.Overrides)
+ seriesHTTPMiddleware := querier.WrapQuerySpanAndTimeout("query.Series", t.Overrides)
if t.supportIndexDeleteRequest() && t.Cfg.CompactorConfig.RetentionEnabled {
toMerge = append(
@@ -465,6 +466,7 @@ func (t *Loki) initQuerier() (services.Service, error) {
indexStatsHTTPMiddleware = middleware.Merge(httpMiddleware, indexStatsHTTPMiddleware)
volumeHTTPMiddleware = middleware.Merge(httpMiddleware, volumeHTTPMiddleware)
volumeRangeHTTPMiddleware = middleware.Merge(httpMiddleware, volumeRangeHTTPMiddleware)
+ seriesHTTPMiddleware = middleware.Merge(httpMiddleware, seriesHTTPMiddleware)
// First, register the internal querier handler with the external HTTP server
router := t.Server.HTTP
@@ -490,7 +492,7 @@ func (t *Loki) initQuerier() (services.Service, error) {
router.Path("/loki/api/v1/labels").Methods("GET", "POST").Handler(labelsHTTPMiddleware.Wrap(httpHandler))
router.Path("/loki/api/v1/label/{name}/values").Methods("GET", "POST").Handler(labelsHTTPMiddleware.Wrap(httpHandler))
- router.Path("/loki/api/v1/series").Methods("GET", "POST").Handler(querier.WrapQuerySpanAndTimeout("query.Series", t.Overrides).Wrap(httpHandler))
+ router.Path("/loki/api/v1/series").Methods("GET", "POST").Handler(seriesHTTPMiddleware.Wrap(httpHandler))
router.Path("/loki/api/v1/index/stats").Methods("GET", "POST").Handler(indexStatsHTTPMiddleware.Wrap(httpHandler))
router.Path("/loki/api/v1/index/volume").Methods("GET", "POST").Handler(volumeHTTPMiddleware.Wrap(httpHandler))
router.Path("/loki/api/v1/index/volume_range").Methods("GET", "POST").Handler(volumeRangeHTTPMiddleware.Wrap(httpHandler))
@@ -504,7 +506,7 @@ func (t *Loki) initQuerier() (services.Service, error) {
router.Path("/api/prom/label").Methods("GET", "POST").Handler(labelsHTTPMiddleware.Wrap(httpHandler))
router.Path("/api/prom/label/{name}/values").Methods("GET", "POST").Handler(labelsHTTPMiddleware.Wrap(httpHandler))
- router.Path("/api/prom/series").Methods("GET", "POST").Handler(querier.WrapQuerySpanAndTimeout("query.Series", t.Overrides).Wrap(httpHandler))
+ router.Path("/api/prom/series").Methods("GET", "POST").Handler(seriesHTTPMiddleware.Wrap(httpHandler))
}
// We always want to register tail routes externally, tail requests are different from normal queries, they
|
fix
|
fix middlewares being applied for series request when querier is running as a standalone service (#12194)
|
319503643589163edce0b939de0beac074006a9f
|
2024-05-21 16:42:24
|
Salva Corts
|
refactor(bloom planner): Compute gaps and build tasks from metas and TSDBs (#12994)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index f59f6501c94e5..d30dce2f7775b 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -333,6 +333,23 @@ bloom_build:
[enabled: <boolean> | default = false]
planner:
+ # Interval at which to re-run the bloom creation planning.
+ # CLI flag: -bloom-build.planner.interval
+ [planning_interval: <duration> | default = 8h]
+
+ # Newest day-table offset (from today, inclusive) to build blooms for.
+ # Increase to lower cost by not re-writing data to object storage too
+ # frequently since recent data changes more often at the cost of not having
+ # blooms available as quickly.
+ # CLI flag: -bloom-build.planner.min-table-offset
+ [min_table_offset: <int> | default = 1]
+
+ # Oldest day-table offset (from today, inclusive) to compact. This can be
+ # used to lower cost by not trying to compact older data which doesn't
+ # change. This can be optimized by aligning it with the maximum
+ # `reject_old_samples_max_age` setting of any tenant.
+ # CLI flag: -bloom-build.planner.max-table-offset
+ [max_table_offset: <int> | default = 2]
builder:
@@ -3382,6 +3399,16 @@ shard_streams:
# CLI flag: -bloom-compactor.max-bloom-size
[bloom_compactor_max_bloom_size: <int> | default = 128MB]
+# Experimental. Whether to create blooms for the tenant.
+# CLI flag: -bloom-build.enable
+[bloom_creation_enabled: <boolean> | default = false]
+
+# Experimental. Number of splits to create for the series keyspace when building
+# blooms. The series keyspace is split into this many parts to parallelize bloom
+# creation.
+# CLI flag: -bloom-build.split-keyspace-by
+[bloom_split_series_keyspace_by: <int> | default = 256]
+
# Experimental. Length of the n-grams created when computing blooms from log
# lines.
# CLI flag: -bloom-compactor.ngram-length
diff --git a/pkg/bloombuild/planner/config.go b/pkg/bloombuild/planner/config.go
index dd8cb315d9345..47b01c0b286e0 100644
--- a/pkg/bloombuild/planner/config.go
+++ b/pkg/bloombuild/planner/config.go
@@ -1,21 +1,40 @@
package planner
-import "flag"
+import (
+ "flag"
+ "fmt"
+ "time"
+)
// Config configures the bloom-planner component.
type Config struct {
- // TODO: Add config
+ PlanningInterval time.Duration `yaml:"planning_interval"`
+ MinTableOffset int `yaml:"min_table_offset"`
+ MaxTableOffset int `yaml:"max_table_offset"`
}
// RegisterFlagsWithPrefix registers flags for the bloom-planner configuration.
-func (cfg *Config) RegisterFlagsWithPrefix(_ string, _ *flag.FlagSet) {
- // TODO: Register flags with flagsPrefix
+func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.DurationVar(&cfg.PlanningInterval, prefix+".interval", 8*time.Hour, "Interval at which to re-run the bloom creation planning.")
+ f.IntVar(&cfg.MinTableOffset, prefix+".min-table-offset", 1, "Newest day-table offset (from today, inclusive) to build blooms for. Increase to lower cost by not re-writing data to object storage too frequently since recent data changes more often at the cost of not having blooms available as quickly.")
+ // TODO(owen-d): ideally we'd set this per tenant based on their `reject_old_samples_max_age` setting,
+ // but due to how we need to discover tenants, we can't do that yet. Tenant+Period discovery is done by
+ // iterating the table periods in object storage and looking for tenants within that period.
+ // In order to have this done dynamically, we'd need to account for tenant specific overrides, which are also
+ // dynamically reloaded.
+ // I'm doing it the simple way for now.
+ f.IntVar(&cfg.MaxTableOffset, prefix+".max-table-offset", 2, "Oldest day-table offset (from today, inclusive) to compact. This can be used to lower cost by not trying to compact older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant.")
}
func (cfg *Config) Validate() error {
+ if cfg.MinTableOffset > cfg.MaxTableOffset {
+ return fmt.Errorf("min-table-offset (%d) must be less than or equal to max-table-offset (%d)", cfg.MinTableOffset, cfg.MaxTableOffset)
+ }
+
return nil
}
type Limits interface {
- // TODO: Add limits
+ BloomCreationEnabled(tenantID string) bool
+ BloomSplitSeriesKeyspaceBy(tenantID string) int
}
diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go
index e9a9035e14df0..c0028237d9b1d 100644
--- a/pkg/bloombuild/planner/metrics.go
+++ b/pkg/bloombuild/planner/metrics.go
@@ -8,10 +8,19 @@ import (
const (
metricsNamespace = "loki"
metricsSubsystem = "bloomplanner"
+
+ statusSuccess = "success"
+ statusFailure = "failure"
)
type Metrics struct {
running prometheus.Gauge
+
+ buildStarted prometheus.Counter
+ buildCompleted *prometheus.CounterVec
+ buildTime *prometheus.HistogramVec
+
+ tenantsDiscovered prometheus.Counter
}
func NewMetrics(r prometheus.Registerer) *Metrics {
@@ -22,5 +31,32 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
Name: "running",
Help: "Value will be 1 if bloom planner is currently running on this instance",
}),
+
+ buildStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "build_started_total",
+ Help: "Total number of builds started",
+ }),
+ buildCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "build_completed_total",
+ Help: "Total number of builds completed",
+ }, []string{"status"}),
+ buildTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "build_time_seconds",
+ Help: "Time spent during a builds cycle.",
+ Buckets: prometheus.DefBuckets,
+ }, []string{"status"}),
+
+ tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Namespace: metricsNamespace,
+ Subsystem: metricsSubsystem,
+ Name: "tenants_discovered_total",
+ Help: "Number of tenants discovered during the current build iteration",
+ }),
}
}
diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go
index 7732d180b0bb8..0be853a2f604a 100644
--- a/pkg/bloombuild/planner/planner.go
+++ b/pkg/bloombuild/planner/planner.go
@@ -2,33 +2,63 @@ package planner
import (
"context"
+ "fmt"
+ "sort"
+ "time"
"github.com/go-kit/log"
+ "github.com/go-kit/log/level"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+ "github.com/grafana/loki/v3/pkg/storage"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
utillog "github.com/grafana/loki/v3/pkg/util/log"
)
type Planner struct {
services.Service
- cfg Config
+ cfg Config
+ limits Limits
+ schemaCfg config.SchemaConfig
+
+ tsdbStore TSDBStore
+ bloomStore bloomshipper.Store
+
metrics *Metrics
logger log.Logger
}
func New(
cfg Config,
+ limits Limits,
+ schemaCfg config.SchemaConfig,
+ storeCfg storage.Config,
+ storageMetrics storage.ClientMetrics,
+ bloomStore bloomshipper.Store,
logger log.Logger,
r prometheus.Registerer,
) (*Planner, error) {
utillog.WarnExperimentalUse("Bloom Planner", logger)
+ tsdbStore, err := NewTSDBStores(schemaCfg, storeCfg, storageMetrics, logger)
+ if err != nil {
+ return nil, fmt.Errorf("error creating TSDB store: %w", err)
+ }
+
p := &Planner{
- cfg: cfg,
- metrics: NewMetrics(r),
- logger: logger,
+ cfg: cfg,
+ limits: limits,
+ schemaCfg: schemaCfg,
+ tsdbStore: tsdbStore,
+ bloomStore: bloomStore,
+ metrics: NewMetrics(r),
+ logger: logger,
}
p.Service = services.NewBasicService(p.starting, p.running, p.stopping)
@@ -45,6 +75,373 @@ func (p *Planner) stopping(_ error) error {
return nil
}
-func (p *Planner) running(_ context.Context) error {
+func (p *Planner) running(ctx context.Context) error {
+ // run once at beginning
+ if err := p.runOne(ctx); err != nil {
+ level.Error(p.logger).Log("msg", "bloom build iteration failed for the first time", "err", err)
+ }
+
+ ticker := time.NewTicker(p.cfg.PlanningInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ err := ctx.Err()
+ level.Debug(p.logger).Log("msg", "planner context done", "err", err)
+ return err
+
+ case <-ticker.C:
+ if err := p.runOne(ctx); err != nil {
+ level.Error(p.logger).Log("msg", "bloom build iteration failed", "err", err)
+ }
+ }
+ }
+}
+
+func (p *Planner) runOne(ctx context.Context) error {
+ var (
+ start = time.Now()
+ status = statusFailure
+ )
+ defer func() {
+ p.metrics.buildCompleted.WithLabelValues(status).Inc()
+ p.metrics.buildTime.WithLabelValues(status).Observe(time.Since(start).Seconds())
+ }()
+
+ p.metrics.buildStarted.Inc()
+ level.Info(p.logger).Log("msg", "running bloom build planning")
+
+ tables := p.tables(time.Now())
+ level.Debug(p.logger).Log("msg", "loaded tables", "tables", tables.TotalDays())
+
+ work, err := p.loadWork(ctx, tables)
+ if err != nil {
+ level.Error(p.logger).Log("msg", "error loading work", "err", err)
+ return fmt.Errorf("error loading work: %w", err)
+ }
+
+ // TODO: Enqueue instead of buffering here
+ // This is just a placeholder for now
+ var tasks []Task
+
+ for _, w := range work {
+ gaps, err := p.findGapsForBounds(ctx, w.tenant, w.table, w.ownershipRange)
+ if err != nil {
+ level.Error(p.logger).Log("msg", "error finding gaps", "err", err, "tenant", w.tenant, "table", w.table, "ownership", w.ownershipRange.String())
+ return fmt.Errorf("error finding gaps for tenant (%s) in table (%s) for bounds (%s): %w", w.tenant, w.table, w.ownershipRange, err)
+ }
+
+ for _, gap := range gaps {
+ tasks = append(tasks, Task{
+ table: w.table.Addr(),
+ tenant: w.tenant,
+ OwnershipBounds: w.ownershipRange,
+ tsdb: gap.tsdb,
+ gaps: gap.gaps,
+ })
+ }
+ }
+
+ status = statusSuccess
+ level.Info(p.logger).Log(
+ "msg", "bloom build iteration completed",
+ "duration", time.Since(start).Seconds(),
+ "tasks", len(tasks),
+ )
return nil
}
+
+func (p *Planner) tables(ts time.Time) *dayRangeIterator {
+ // adjust the minimum by one to make it inclusive, which is more intuitive
+ // for a configuration variable
+ adjustedMin := p.cfg.MinTableOffset - 1
+ minCompactionDelta := time.Duration(adjustedMin) * config.ObjectStorageIndexRequiredPeriod
+ maxCompactionDelta := time.Duration(p.cfg.MaxTableOffset) * config.ObjectStorageIndexRequiredPeriod
+
+ from := ts.Add(-maxCompactionDelta).UnixNano() / int64(config.ObjectStorageIndexRequiredPeriod) * int64(config.ObjectStorageIndexRequiredPeriod)
+ through := ts.Add(-minCompactionDelta).UnixNano() / int64(config.ObjectStorageIndexRequiredPeriod) * int64(config.ObjectStorageIndexRequiredPeriod)
+
+ fromDay := config.NewDayTime(model.TimeFromUnixNano(from))
+ throughDay := config.NewDayTime(model.TimeFromUnixNano(through))
+ level.Debug(p.logger).Log("msg", "loaded tables for compaction", "from", fromDay, "through", throughDay)
+ return newDayRangeIterator(fromDay, throughDay, p.schemaCfg)
+}
+
+type tenantTableRange struct {
+ tenant string
+ table config.DayTable
+ ownershipRange v1.FingerprintBounds
+
+ // TODO: Add tracking
+ //finished bool
+ //queueTime, startTime, endTime time.Time
+}
+
+func (p *Planner) loadWork(
+ ctx context.Context,
+ tables *dayRangeIterator,
+) ([]tenantTableRange, error) {
+ var work []tenantTableRange
+
+ for tables.Next() && tables.Err() == nil && ctx.Err() == nil {
+ table := tables.At()
+ level.Debug(p.logger).Log("msg", "loading work for table", "table", table)
+
+ tenants, err := p.tenants(ctx, table)
+ if err != nil {
+ return nil, fmt.Errorf("error loading tenants: %w", err)
+ }
+ level.Debug(p.logger).Log("msg", "loaded tenants", "table", table, "tenants", tenants.Len())
+
+ for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil {
+ p.metrics.tenantsDiscovered.Inc()
+ tenant := tenants.At()
+
+ if !p.limits.BloomCreationEnabled(tenant) {
+ continue
+ }
+
+ splitFactor := p.limits.BloomSplitSeriesKeyspaceBy(tenant)
+ bounds := SplitFingerprintKeyspaceByFactor(splitFactor)
+
+ for _, bounds := range bounds {
+ work = append(work, tenantTableRange{
+ tenant: tenant,
+ table: table,
+ ownershipRange: bounds,
+ })
+ }
+
+ level.Debug(p.logger).Log("msg", "loading work for tenant", "table", table, "tenant", tenant, "splitFactor", splitFactor)
+ }
+ if err := tenants.Err(); err != nil {
+ level.Error(p.logger).Log("msg", "error iterating tenants", "err", err)
+ return nil, fmt.Errorf("error iterating tenants: %w", err)
+ }
+
+ }
+ if err := tables.Err(); err != nil {
+ level.Error(p.logger).Log("msg", "error iterating tables", "err", err)
+ return nil, fmt.Errorf("error iterating tables: %w", err)
+ }
+
+ return work, ctx.Err()
+}
+
+func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*v1.SliceIter[string], error) {
+ tenants, err := p.tsdbStore.UsersForPeriod(ctx, table)
+ if err != nil {
+ return nil, fmt.Errorf("error loading tenants for table (%s): %w", table, err)
+ }
+
+ return v1.NewSliceIter(tenants), nil
+}
+
+/*
+Planning works as follows, split across many functions for clarity:
+ 1. Fetch all meta.jsons for the given tenant and table which overlap the ownership range of this compactor.
+ 2. Load current TSDBs for this tenant/table.
+ 3. For each live TSDB (there should be only 1, but this works with multiple), find any gaps
+ (fingerprint ranges) which are not up-to-date, determined by checking other meta.json files and comparing
+ the TSDBs they were generated from as well as their ownership ranges.
+*/
+func (p *Planner) findGapsForBounds(
+ ctx context.Context,
+ tenant string,
+ table config.DayTable,
+ ownershipRange v1.FingerprintBounds,
+) ([]blockPlan, error) {
+ logger := log.With(p.logger, "org_id", tenant, "table", table.Addr(), "ownership", ownershipRange.String())
+
+ // Fetch source metas to be used in both build and cleanup of out-of-date metas+blooms
+ metas, err := p.bloomStore.FetchMetas(
+ ctx,
+ bloomshipper.MetaSearchParams{
+ TenantID: tenant,
+ Interval: bloomshipper.NewInterval(table.Bounds()),
+ Keyspace: ownershipRange,
+ },
+ )
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to get metas", "err", err)
+ return nil, fmt.Errorf("failed to get metas: %w", err)
+ }
+
+ level.Debug(logger).Log("msg", "found relevant metas", "metas", len(metas))
+
+ // Find gaps in the TSDBs for this tenant/table
+ gaps, err := p.findOutdatedGaps(ctx, tenant, table, ownershipRange, metas, logger)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find outdated gaps: %w", err)
+ }
+
+ return gaps, nil
+}
+
+// blockPlan is a plan for all the work needed to build a meta.json
+// It includes:
+// - the tsdb (source of truth) which contains all the series+chunks
+// we need to ensure are indexed in bloom blocks
+// - a list of gaps that are out of date and need to be checked+built
+// - within each gap, a list of block refs which overlap the gap are included
+// so we can use them to accelerate bloom generation. They likely contain many
+// of the same chunks we need to ensure are indexed, just from previous tsdb iterations.
+// This is a performance optimization to avoid expensive re-reindexing
+type blockPlan struct {
+ tsdb tsdb.SingleTenantTSDBIdentifier
+ gaps []GapWithBlocks
+}
+
+func (p *Planner) findOutdatedGaps(
+ ctx context.Context,
+ tenant string,
+ table config.DayTable,
+ ownershipRange v1.FingerprintBounds,
+ metas []bloomshipper.Meta,
+ logger log.Logger,
+) ([]blockPlan, error) {
+ // Resolve TSDBs
+ tsdbs, err := p.tsdbStore.ResolveTSDBs(ctx, table, tenant)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to resolve tsdbs", "err", err)
+ return nil, fmt.Errorf("failed to resolve tsdbs: %w", err)
+ }
+
+ if len(tsdbs) == 0 {
+ return nil, nil
+ }
+
+ // Determine which TSDBs have gaps in the ownership range and need to
+ // be processed.
+ tsdbsWithGaps, err := gapsBetweenTSDBsAndMetas(ownershipRange, tsdbs, metas)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to find gaps", "err", err)
+ return nil, fmt.Errorf("failed to find gaps: %w", err)
+ }
+
+ if len(tsdbsWithGaps) == 0 {
+ level.Debug(logger).Log("msg", "blooms exist for all tsdbs")
+ return nil, nil
+ }
+
+ work, err := blockPlansForGaps(tsdbsWithGaps, metas)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to create plan", "err", err)
+ return nil, fmt.Errorf("failed to create plan: %w", err)
+ }
+
+ return work, nil
+}
+
+// Used to signal the gaps that need to be populated for a tsdb
+type tsdbGaps struct {
+ tsdb tsdb.SingleTenantTSDBIdentifier
+ gaps []v1.FingerprintBounds
+}
+
+// gapsBetweenTSDBsAndMetas returns if the metas are up-to-date with the TSDBs. This is determined by asserting
+// that for each TSDB, there are metas covering the entire ownership range which were generated from that specific TSDB.
+func gapsBetweenTSDBsAndMetas(
+ ownershipRange v1.FingerprintBounds,
+ tsdbs []tsdb.SingleTenantTSDBIdentifier,
+ metas []bloomshipper.Meta,
+) (res []tsdbGaps, err error) {
+ for _, db := range tsdbs {
+ id := db.Name()
+
+ relevantMetas := make([]v1.FingerprintBounds, 0, len(metas))
+ for _, meta := range metas {
+ for _, s := range meta.Sources {
+ if s.Name() == id {
+ relevantMetas = append(relevantMetas, meta.Bounds)
+ }
+ }
+ }
+
+ gaps, err := FindGapsInFingerprintBounds(ownershipRange, relevantMetas)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(gaps) > 0 {
+ res = append(res, tsdbGaps{
+ tsdb: db,
+ gaps: gaps,
+ })
+ }
+ }
+
+ return res, err
+}
+
+// blockPlansForGaps groups tsdb gaps we wish to fill with overlapping but out of date blocks.
+// This allows us to expedite bloom generation by using existing blocks to fill in the gaps
+// since many will contain the same chunks.
+func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan, error) {
+ plans := make([]blockPlan, 0, len(tsdbs))
+
+ for _, idx := range tsdbs {
+ plan := blockPlan{
+ tsdb: idx.tsdb,
+ gaps: make([]GapWithBlocks, 0, len(idx.gaps)),
+ }
+
+ for _, gap := range idx.gaps {
+ planGap := GapWithBlocks{
+ bounds: gap,
+ }
+
+ for _, meta := range metas {
+
+ if meta.Bounds.Intersection(gap) == nil {
+ // this meta doesn't overlap the gap, skip
+ continue
+ }
+
+ for _, block := range meta.Blocks {
+ if block.Bounds.Intersection(gap) == nil {
+ // this block doesn't overlap the gap, skip
+ continue
+ }
+ // this block overlaps the gap, add it to the plan
+ // for this gap
+ planGap.blocks = append(planGap.blocks, block)
+ }
+ }
+
+ // ensure we sort blocks so deduping iterator works as expected
+ sort.Slice(planGap.blocks, func(i, j int) bool {
+ return planGap.blocks[i].Bounds.Less(planGap.blocks[j].Bounds)
+ })
+
+ peekingBlocks := v1.NewPeekingIter[bloomshipper.BlockRef](
+ v1.NewSliceIter[bloomshipper.BlockRef](
+ planGap.blocks,
+ ),
+ )
+ // dedupe blocks which could be in multiple metas
+ itr := v1.NewDedupingIter[bloomshipper.BlockRef, bloomshipper.BlockRef](
+ func(a, b bloomshipper.BlockRef) bool {
+ return a == b
+ },
+ v1.Identity[bloomshipper.BlockRef],
+ func(a, _ bloomshipper.BlockRef) bloomshipper.BlockRef {
+ return a
+ },
+ peekingBlocks,
+ )
+
+ deduped, err := v1.Collect[bloomshipper.BlockRef](itr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to dedupe blocks: %w", err)
+ }
+ planGap.blocks = deduped
+
+ plan.gaps = append(plan.gaps, planGap)
+ }
+
+ plans = append(plans, plan)
+ }
+
+ return plans, nil
+}
diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go
new file mode 100644
index 0000000000000..346bd145ab8dc
--- /dev/null
+++ b/pkg/bloombuild/planner/planner_test.go
@@ -0,0 +1,321 @@
+package planner
+
+import (
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+)
+
+func tsdbID(n int) tsdb.SingleTenantTSDBIdentifier {
+ return tsdb.SingleTenantTSDBIdentifier{
+ TS: time.Unix(int64(n), 0),
+ }
+}
+
+func genMeta(min, max model.Fingerprint, sources []int, blocks []bloomshipper.BlockRef) bloomshipper.Meta {
+ m := bloomshipper.Meta{
+ MetaRef: bloomshipper.MetaRef{
+ Ref: bloomshipper.Ref{
+ Bounds: v1.NewBounds(min, max),
+ },
+ },
+ Blocks: blocks,
+ }
+ for _, source := range sources {
+ m.Sources = append(m.Sources, tsdbID(source))
+ }
+ return m
+}
+
+func Test_gapsBetweenTSDBsAndMetas(t *testing.T) {
+
+ for _, tc := range []struct {
+ desc string
+ err bool
+ exp []tsdbGaps
+ ownershipRange v1.FingerprintBounds
+ tsdbs []tsdb.SingleTenantTSDBIdentifier
+ metas []bloomshipper.Meta
+ }{
+ {
+ desc: "non-overlapping tsdbs and metas",
+ err: true,
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
+ metas: []bloomshipper.Meta{
+ genMeta(11, 20, []int{0}, nil),
+ },
+ },
+ {
+ desc: "single tsdb",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
+ metas: []bloomshipper.Meta{
+ genMeta(4, 8, []int{0}, nil),
+ },
+ exp: []tsdbGaps{
+ {
+ tsdb: tsdbID(0),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 3),
+ v1.NewBounds(9, 10),
+ },
+ },
+ },
+ },
+ {
+ desc: "multiple tsdbs with separate blocks",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)},
+ metas: []bloomshipper.Meta{
+ genMeta(0, 5, []int{0}, nil),
+ genMeta(6, 10, []int{1}, nil),
+ },
+ exp: []tsdbGaps{
+ {
+ tsdb: tsdbID(0),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(6, 10),
+ },
+ },
+ {
+ tsdb: tsdbID(1),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(0, 5),
+ },
+ },
+ },
+ },
+ {
+ desc: "multiple tsdbs with the same blocks",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)},
+ metas: []bloomshipper.Meta{
+ genMeta(0, 5, []int{0, 1}, nil),
+ genMeta(6, 8, []int{1}, nil),
+ },
+ exp: []tsdbGaps{
+ {
+ tsdb: tsdbID(0),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(6, 10),
+ },
+ },
+ {
+ tsdb: tsdbID(1),
+ gaps: []v1.FingerprintBounds{
+ v1.NewBounds(9, 10),
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ gaps, err := gapsBetweenTSDBsAndMetas(tc.ownershipRange, tc.tsdbs, tc.metas)
+ if tc.err {
+ require.Error(t, err)
+ return
+ }
+ require.Equal(t, tc.exp, gaps)
+ })
+ }
+}
+
+func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef {
+ bounds := v1.NewBounds(min, max)
+ return bloomshipper.BlockRef{
+ Ref: bloomshipper.Ref{
+ Bounds: bounds,
+ },
+ }
+}
+
+func Test_blockPlansForGaps(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ ownershipRange v1.FingerprintBounds
+ tsdbs []tsdb.SingleTenantTSDBIdentifier
+ metas []bloomshipper.Meta
+ err bool
+ exp []blockPlan
+ }{
+ {
+ desc: "single overlapping meta+no overlapping block",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
+ metas: []bloomshipper.Meta{
+ genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(11, 20)}),
+ },
+ exp: []blockPlan{
+ {
+ tsdb: tsdbID(0),
+ gaps: []GapWithBlocks{
+ {
+ bounds: v1.NewBounds(0, 10),
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "single overlapping meta+one overlapping block",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
+ metas: []bloomshipper.Meta{
+ genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(9, 20)}),
+ },
+ exp: []blockPlan{
+ {
+ tsdb: tsdbID(0),
+ gaps: []GapWithBlocks{
+ {
+ bounds: v1.NewBounds(0, 10),
+ blocks: []bloomshipper.BlockRef{genBlockRef(9, 20)},
+ },
+ },
+ },
+ },
+ },
+ {
+ // the range which needs to be generated doesn't overlap with existing blocks
+ // from other tsdb versions since theres an up to date tsdb version block,
+ // but we can trim the range needing generation
+ desc: "trims up to date area",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
+ metas: []bloomshipper.Meta{
+ genMeta(9, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for same tsdb
+ genMeta(9, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for different tsdb
+ },
+ exp: []blockPlan{
+ {
+ tsdb: tsdbID(0),
+ gaps: []GapWithBlocks{
+ {
+ bounds: v1.NewBounds(0, 8),
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "uses old block for overlapping range",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
+ metas: []bloomshipper.Meta{
+ genMeta(9, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for same tsdb
+ genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(5, 20)}), // block for different tsdb
+ },
+ exp: []blockPlan{
+ {
+ tsdb: tsdbID(0),
+ gaps: []GapWithBlocks{
+ {
+ bounds: v1.NewBounds(0, 8),
+ blocks: []bloomshipper.BlockRef{genBlockRef(5, 20)},
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "multi case",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)}, // generate for both tsdbs
+ metas: []bloomshipper.Meta{
+ genMeta(0, 2, []int{0}, []bloomshipper.BlockRef{
+ genBlockRef(0, 1),
+ genBlockRef(1, 2),
+ }), // tsdb_0
+ genMeta(6, 8, []int{0}, []bloomshipper.BlockRef{genBlockRef(6, 8)}), // tsdb_0
+
+ genMeta(3, 5, []int{1}, []bloomshipper.BlockRef{genBlockRef(3, 5)}), // tsdb_1
+ genMeta(8, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(8, 10)}), // tsdb_1
+ },
+ exp: []blockPlan{
+ {
+ tsdb: tsdbID(0),
+ gaps: []GapWithBlocks{
+ // tsdb (id=0) can source chunks from the blocks built from tsdb (id=1)
+ {
+ bounds: v1.NewBounds(3, 5),
+ blocks: []bloomshipper.BlockRef{genBlockRef(3, 5)},
+ },
+ {
+ bounds: v1.NewBounds(9, 10),
+ blocks: []bloomshipper.BlockRef{genBlockRef(8, 10)},
+ },
+ },
+ },
+ // tsdb (id=1) can source chunks from the blocks built from tsdb (id=0)
+ {
+ tsdb: tsdbID(1),
+ gaps: []GapWithBlocks{
+ {
+ bounds: v1.NewBounds(0, 2),
+ blocks: []bloomshipper.BlockRef{
+ genBlockRef(0, 1),
+ genBlockRef(1, 2),
+ },
+ },
+ {
+ bounds: v1.NewBounds(6, 7),
+ blocks: []bloomshipper.BlockRef{genBlockRef(6, 8)},
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "dedupes block refs",
+ ownershipRange: v1.NewBounds(0, 10),
+ tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)},
+ metas: []bloomshipper.Meta{
+ genMeta(9, 20, []int{1}, []bloomshipper.BlockRef{
+ genBlockRef(1, 4),
+ genBlockRef(9, 20),
+ }), // blocks for first diff tsdb
+ genMeta(5, 20, []int{2}, []bloomshipper.BlockRef{
+ genBlockRef(5, 10),
+ genBlockRef(9, 20), // same block references in prior meta (will be deduped)
+ }), // block for second diff tsdb
+ },
+ exp: []blockPlan{
+ {
+ tsdb: tsdbID(0),
+ gaps: []GapWithBlocks{
+ {
+ bounds: v1.NewBounds(0, 10),
+ blocks: []bloomshipper.BlockRef{
+ genBlockRef(1, 4),
+ genBlockRef(5, 10),
+ genBlockRef(9, 20),
+ },
+ },
+ },
+ },
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ // we reuse the gapsBetweenTSDBsAndMetas function to generate the gaps as this function is tested
+ // separately and it's used to generate input in our regular code path (easier to write tests this way).
+ gaps, err := gapsBetweenTSDBsAndMetas(tc.ownershipRange, tc.tsdbs, tc.metas)
+ require.NoError(t, err)
+
+ plans, err := blockPlansForGaps(gaps, tc.metas)
+ if tc.err {
+ require.Error(t, err)
+ return
+ }
+ require.Equal(t, tc.exp, plans)
+
+ })
+ }
+}
diff --git a/pkg/bloombuild/planner/tableIterator.go b/pkg/bloombuild/planner/tableIterator.go
new file mode 100644
index 0000000000000..c17458a04806c
--- /dev/null
+++ b/pkg/bloombuild/planner/tableIterator.go
@@ -0,0 +1,50 @@
+package planner
+
+import (
+ "fmt"
+
+ "github.com/grafana/loki/v3/pkg/storage/config"
+)
+
+type dayRangeIterator struct {
+ min, max, cur config.DayTime
+ curPeriod config.PeriodConfig
+ schemaCfg config.SchemaConfig
+ err error
+}
+
+func newDayRangeIterator(min, max config.DayTime, schemaCfg config.SchemaConfig) *dayRangeIterator {
+ return &dayRangeIterator{min: min, max: max, cur: min.Dec(), schemaCfg: schemaCfg}
+}
+
+func (r *dayRangeIterator) TotalDays() int {
+ offset := r.cur
+ if r.cur.Before(r.min) {
+ offset = r.min
+ }
+ return int(r.max.Sub(offset.Time) / config.ObjectStorageIndexRequiredPeriod)
+}
+
+func (r *dayRangeIterator) Next() bool {
+ r.cur = r.cur.Inc()
+ if !r.cur.Before(r.max) {
+ return false
+ }
+
+ period, err := r.schemaCfg.SchemaForTime(r.cur.ModelTime())
+ if err != nil {
+ r.err = fmt.Errorf("getting schema for time (%s): %w", r.cur, err)
+ return false
+ }
+ r.curPeriod = period
+
+ return true
+}
+
+func (r *dayRangeIterator) At() config.DayTable {
+ return config.NewDayTable(r.cur, r.curPeriod.IndexTables.Prefix)
+}
+
+func (r *dayRangeIterator) Err() error {
+ return nil
+}
diff --git a/pkg/bloombuild/planner/task.go b/pkg/bloombuild/planner/task.go
new file mode 100644
index 0000000000000..80f730c4fb6dd
--- /dev/null
+++ b/pkg/bloombuild/planner/task.go
@@ -0,0 +1,22 @@
+package planner
+
+import (
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+)
+
+// TODO: Extract this definiton to a proto file at pkg/bloombuild/protos/protos.proto
+
+type GapWithBlocks struct {
+ bounds v1.FingerprintBounds
+ blocks []bloomshipper.BlockRef
+}
+
+type Task struct {
+ table string
+ tenant string
+ OwnershipBounds v1.FingerprintBounds
+ tsdb tsdb.SingleTenantTSDBIdentifier
+ gaps []GapWithBlocks
+}
diff --git a/pkg/bloombuild/planner/tsdb.go b/pkg/bloombuild/planner/tsdb.go
new file mode 100644
index 0000000000000..7c15c43306db2
--- /dev/null
+++ b/pkg/bloombuild/planner/tsdb.go
@@ -0,0 +1,261 @@
+package planner
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "path"
+ "strings"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/pkg/errors"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/grafana/loki/v3/pkg/chunkenc"
+ baseStore "github.com/grafana/loki/v3/pkg/storage"
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding"
+ "github.com/grafana/loki/v3/pkg/storage/types"
+)
+
+const (
+ gzipExtension = ".gz"
+)
+
+type TSDBStore interface {
+ UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error)
+ ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error)
+ LoadTSDB(
+ ctx context.Context,
+ table config.DayTable,
+ tenant string,
+ id tsdb.Identifier,
+ bounds v1.FingerprintBounds,
+ ) (v1.Iterator[*v1.Series], error)
+}
+
+// BloomTSDBStore is a wrapper around the storage.Client interface which
+// implements the TSDBStore interface for this pkg.
+type BloomTSDBStore struct {
+ storage storage.Client
+ logger log.Logger
+}
+
+func NewBloomTSDBStore(storage storage.Client, logger log.Logger) *BloomTSDBStore {
+ return &BloomTSDBStore{
+ storage: storage,
+ logger: logger,
+ }
+}
+
+func (b *BloomTSDBStore) UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error) {
+ _, users, err := b.storage.ListFiles(ctx, table.Addr(), true) // bypass cache for ease of testing
+ return users, err
+}
+
+func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
+ indices, err := b.storage.ListUserFiles(ctx, table.Addr(), tenant, true) // bypass cache for ease of testing
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to list user files")
+ }
+
+ ids := make([]tsdb.SingleTenantTSDBIdentifier, 0, len(indices))
+ for _, index := range indices {
+ key := index.Name
+ if decompress := storage.IsCompressedFile(index.Name); decompress {
+ key = strings.TrimSuffix(key, gzipExtension)
+ }
+
+ id, ok := tsdb.ParseSingleTenantTSDBPath(path.Base(key))
+ if !ok {
+ return nil, errors.Errorf("failed to parse single tenant tsdb path: %s", key)
+ }
+
+ ids = append(ids, id)
+
+ }
+ return ids, nil
+}
+
+func (b *BloomTSDBStore) LoadTSDB(
+ ctx context.Context,
+ table config.DayTable,
+ tenant string,
+ id tsdb.Identifier,
+ bounds v1.FingerprintBounds,
+) (v1.Iterator[*v1.Series], error) {
+ withCompression := id.Name() + gzipExtension
+
+ data, err := b.storage.GetUserFile(ctx, table.Addr(), tenant, withCompression)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get file")
+ }
+ defer data.Close()
+
+ decompressorPool := chunkenc.GetReaderPool(chunkenc.EncGZIP)
+ decompressor, err := decompressorPool.GetReader(data)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get decompressor")
+ }
+ defer decompressorPool.PutReader(decompressor)
+
+ buf, err := io.ReadAll(decompressor)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to read file")
+ }
+
+ reader, err := index.NewReader(index.RealByteSlice(buf))
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create index reader")
+ }
+
+ idx := tsdb.NewTSDBIndex(reader)
+ defer func() {
+ if err := idx.Close(); err != nil {
+ level.Error(b.logger).Log("msg", "failed to close index", "err", err)
+ }
+ }()
+
+ return NewTSDBSeriesIter(ctx, tenant, idx, bounds)
+}
+
+func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, bounds v1.FingerprintBounds) (v1.Iterator[*v1.Series], error) {
+ // TODO(salvacorts): Create a pool
+ series := make([]*v1.Series, 0, 100)
+
+ if err := f.ForSeries(
+ ctx,
+ user,
+ bounds,
+ 0, math.MaxInt64,
+ func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) {
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ res := &v1.Series{
+ Fingerprint: fp,
+ Chunks: make(v1.ChunkRefs, 0, len(chks)),
+ }
+ for _, chk := range chks {
+ res.Chunks = append(res.Chunks, v1.ChunkRef{
+ From: model.Time(chk.MinTime),
+ Through: model.Time(chk.MaxTime),
+ Checksum: chk.Checksum,
+ })
+ }
+
+ series = append(series, res)
+ return false
+ }
+ },
+ labels.MustNewMatcher(labels.MatchEqual, "", ""),
+ ); err != nil {
+ return nil, err
+ }
+
+ select {
+ case <-ctx.Done():
+ return v1.NewEmptyIter[*v1.Series](), ctx.Err()
+ default:
+ return v1.NewCancelableIter[*v1.Series](ctx, v1.NewSliceIter[*v1.Series](series)), nil
+ }
+}
+
+type TSDBStores struct {
+ schemaCfg config.SchemaConfig
+ stores []TSDBStore
+}
+
+func NewTSDBStores(
+ schemaCfg config.SchemaConfig,
+ storeCfg baseStore.Config,
+ clientMetrics baseStore.ClientMetrics,
+ logger log.Logger,
+) (*TSDBStores, error) {
+ res := &TSDBStores{
+ schemaCfg: schemaCfg,
+ stores: make([]TSDBStore, len(schemaCfg.Configs)),
+ }
+
+ for i, cfg := range schemaCfg.Configs {
+ if cfg.IndexType == types.TSDBType {
+
+ c, err := baseStore.NewObjectClient(cfg.ObjectType, storeCfg, clientMetrics)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create object client")
+ }
+ res.stores[i] = NewBloomTSDBStore(storage.NewIndexStorageClient(c, cfg.IndexTables.PathPrefix), logger)
+ }
+ }
+
+ return res, nil
+}
+
+func (s *TSDBStores) storeForPeriod(table config.DayTime) (TSDBStore, error) {
+ for i := len(s.schemaCfg.Configs) - 1; i >= 0; i-- {
+ period := s.schemaCfg.Configs[i]
+
+ if !table.Before(period.From) {
+ // we have the desired period config
+
+ if s.stores[i] != nil {
+ // valid: it's of tsdb type
+ return s.stores[i], nil
+ }
+
+ // invalid
+ return nil, errors.Errorf(
+ "store for period is not of TSDB type (%s) while looking up store for (%v)",
+ period.IndexType,
+ table,
+ )
+ }
+
+ }
+
+ return nil, fmt.Errorf(
+ "there is no store matching no matching period found for table (%v) -- too early",
+ table,
+ )
+}
+
+func (s *TSDBStores) UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error) {
+ store, err := s.storeForPeriod(table.DayTime)
+ if err != nil {
+ return nil, err
+ }
+
+ return store.UsersForPeriod(ctx, table)
+}
+
+func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) {
+ store, err := s.storeForPeriod(table.DayTime)
+ if err != nil {
+ return nil, err
+ }
+
+ return store.ResolveTSDBs(ctx, table, tenant)
+}
+
+func (s *TSDBStores) LoadTSDB(
+ ctx context.Context,
+ table config.DayTable,
+ tenant string,
+ id tsdb.Identifier,
+ bounds v1.FingerprintBounds,
+) (v1.Iterator[*v1.Series], error) {
+ store, err := s.storeForPeriod(table.DayTime)
+ if err != nil {
+ return nil, err
+ }
+
+ return store.LoadTSDB(ctx, table, tenant, id, bounds)
+}
diff --git a/pkg/bloombuild/planner/tsdb_test.go b/pkg/bloombuild/planner/tsdb_test.go
new file mode 100644
index 0000000000000..f47c193c2cd18
--- /dev/null
+++ b/pkg/bloombuild/planner/tsdb_test.go
@@ -0,0 +1,105 @@
+package planner
+
+import (
+ "context"
+ "math"
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+ "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
+)
+
+type forSeriesTestImpl []*v1.Series
+
+func (f forSeriesTestImpl) ForSeries(
+ _ context.Context,
+ _ string,
+ _ index.FingerprintFilter,
+ _ model.Time,
+ _ model.Time,
+ fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) bool,
+ _ ...*labels.Matcher,
+) error {
+ for i := range f {
+ unmapped := make([]index.ChunkMeta, 0, len(f[i].Chunks))
+ for _, c := range f[i].Chunks {
+ unmapped = append(unmapped, index.ChunkMeta{
+ MinTime: int64(c.From),
+ MaxTime: int64(c.Through),
+ Checksum: c.Checksum,
+ })
+ }
+
+ fn(nil, f[i].Fingerprint, unmapped)
+ }
+ return nil
+}
+
+func (f forSeriesTestImpl) Close() error {
+ return nil
+}
+
+func TestTSDBSeriesIter(t *testing.T) {
+ input := []*v1.Series{
+ {
+ Fingerprint: 1,
+ Chunks: []v1.ChunkRef{
+ {
+ From: 0,
+ Through: 1,
+ Checksum: 2,
+ },
+ {
+ From: 3,
+ Through: 4,
+ Checksum: 5,
+ },
+ },
+ },
+ }
+ srcItr := v1.NewSliceIter(input)
+ itr, err := NewTSDBSeriesIter(context.Background(), "", forSeriesTestImpl(input), v1.NewBounds(0, math.MaxUint64))
+ require.NoError(t, err)
+
+ v1.EqualIterators[*v1.Series](
+ t,
+ func(a, b *v1.Series) {
+ require.Equal(t, a, b)
+ },
+ itr,
+ srcItr,
+ )
+}
+
+func TestTSDBSeriesIter_Expiry(t *testing.T) {
+ t.Run("expires on creation", func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ itr, err := NewTSDBSeriesIter(ctx, "", forSeriesTestImpl{
+ {}, // a single entry
+ }, v1.NewBounds(0, math.MaxUint64))
+ require.Error(t, err)
+ require.False(t, itr.Next())
+ })
+
+ t.Run("expires during consumption", func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ itr, err := NewTSDBSeriesIter(ctx, "", forSeriesTestImpl{
+ {},
+ {},
+ }, v1.NewBounds(0, math.MaxUint64))
+ require.NoError(t, err)
+
+ require.True(t, itr.Next())
+ require.NoError(t, itr.Err())
+
+ cancel()
+ require.False(t, itr.Next())
+ require.Error(t, itr.Err())
+ })
+
+}
diff --git a/pkg/bloombuild/planner/util.go b/pkg/bloombuild/planner/util.go
new file mode 100644
index 0000000000000..f9a97587f802f
--- /dev/null
+++ b/pkg/bloombuild/planner/util.go
@@ -0,0 +1,125 @@
+package planner
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/prometheus/common/model"
+
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+)
+
+// SplitFingerprintKeyspaceByFactor splits the keyspace covered by model.Fingerprint into contiguous non-overlapping ranges.
+func SplitFingerprintKeyspaceByFactor(factor int) []v1.FingerprintBounds {
+ if factor <= 0 {
+ return nil
+ }
+
+ bounds := make([]v1.FingerprintBounds, 0, factor)
+
+ // The keyspace of a Fingerprint is from 0 to max uint64.
+ keyspaceSize := uint64(math.MaxUint64)
+
+ // Calculate the size of each range.
+ rangeSize := keyspaceSize / uint64(factor)
+
+ for i := 0; i < factor; i++ {
+ // Calculate the start and end of the range.
+ start := uint64(i) * rangeSize
+ end := start + rangeSize - 1
+
+ // For the last range, make sure it ends at the end of the keyspace.
+ if i == factor-1 {
+ end = keyspaceSize
+ }
+
+ // Create a FingerprintBounds for the range and add it to the slice.
+ bounds = append(bounds, v1.FingerprintBounds{
+ Min: model.Fingerprint(start),
+ Max: model.Fingerprint(end),
+ })
+ }
+
+ return bounds
+}
+
+func FindGapsInFingerprintBounds(ownershipRange v1.FingerprintBounds, metas []v1.FingerprintBounds) (gaps []v1.FingerprintBounds, err error) {
+ if len(metas) == 0 {
+ return []v1.FingerprintBounds{ownershipRange}, nil
+ }
+
+ // turn the available metas into a list of non-overlapping metas
+ // for easier processing
+ var nonOverlapping []v1.FingerprintBounds
+ // First, we reduce the metas into a smaller set by combining overlaps. They must be sorted.
+ var cur *v1.FingerprintBounds
+ for i := 0; i < len(metas); i++ {
+ j := i + 1
+
+ // first iteration (i == 0), set the current meta
+ if cur == nil {
+ cur = &metas[i]
+ }
+
+ if j >= len(metas) {
+ // We've reached the end of the list. Add the last meta to the non-overlapping set.
+ nonOverlapping = append(nonOverlapping, *cur)
+ break
+ }
+
+ combined := cur.Union(metas[j])
+ if len(combined) == 1 {
+ // There was an overlap between the two tested ranges. Combine them and keep going.
+ cur = &combined[0]
+ continue
+ }
+
+ // There was no overlap between the two tested ranges. Add the first to the non-overlapping set.
+ // and keep the second for the next iteration.
+ nonOverlapping = append(nonOverlapping, combined[0])
+ cur = &combined[1]
+ }
+
+ // Now, detect gaps between the non-overlapping metas and the ownership range.
+ // The left bound of the ownership range will be adjusted as we go.
+ leftBound := ownershipRange.Min
+ for _, meta := range nonOverlapping {
+
+ clippedMeta := meta.Intersection(ownershipRange)
+ // should never happen as long as we are only combining metas
+ // that intersect with the ownership range
+ if clippedMeta == nil {
+ return nil, fmt.Errorf("meta is not within ownership range: %v", meta)
+ }
+
+ searchRange := ownershipRange.Slice(leftBound, clippedMeta.Max)
+ // update the left bound for the next iteration
+ // We do the max to prevent the max bound to overflow from MaxUInt64 to 0
+ leftBound = min(
+ max(clippedMeta.Max+1, clippedMeta.Max),
+ max(ownershipRange.Max+1, ownershipRange.Max),
+ )
+
+ // since we've already ensured that the meta is within the ownership range,
+ // we know the xor will be of length zero (when the meta is equal to the ownership range)
+ // or 1 (when the meta is a subset of the ownership range)
+ xors := searchRange.Unless(*clippedMeta)
+ if len(xors) == 0 {
+ // meta is equal to the ownership range. This means the meta
+ // covers this entire section of the ownership range.
+ continue
+ }
+
+ gaps = append(gaps, xors[0])
+ }
+
+ // If the leftBound is less than the ownership range max, and it's smaller than MaxUInt64,
+ // There is a gap between the last meta and the end of the ownership range.
+ // Note: we check `leftBound < math.MaxUint64` since in the loop above we clamp the
+ // leftBound to MaxUint64 to prevent an overflow to 0: `max(clippedMeta.Max+1, clippedMeta.Max)`
+ if leftBound < math.MaxUint64 && leftBound <= ownershipRange.Max {
+ gaps = append(gaps, v1.NewBounds(leftBound, ownershipRange.Max))
+ }
+
+ return gaps, nil
+}
diff --git a/pkg/bloombuild/planner/util_test.go b/pkg/bloombuild/planner/util_test.go
new file mode 100644
index 0000000000000..6755478ef7290
--- /dev/null
+++ b/pkg/bloombuild/planner/util_test.go
@@ -0,0 +1,172 @@
+package planner
+
+import (
+ "math"
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
+)
+
+func TestSplitFingerprintKeyspaceByFactor(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ factor int
+ }{
+ {
+ name: "Factor is 0",
+ factor: 0,
+ },
+ {
+ name: "Factor is 1",
+ factor: 1,
+ },
+ {
+ name: "Factor is 256",
+ factor: 256,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ got := SplitFingerprintKeyspaceByFactor(tt.factor)
+
+ if tt.factor == 0 {
+ require.Empty(t, got)
+ return
+ }
+
+ // Check overall min and max values of the ranges.
+ require.Equal(t, model.Fingerprint(math.MaxUint64), got[len(got)-1].Max)
+ require.Equal(t, model.Fingerprint(0), got[0].Min)
+
+ // For each range, check that the max value of the previous range is one less than the min value of the current range.
+ for i := 1; i < len(got); i++ {
+ require.Equal(t, got[i-1].Max+1, got[i].Min)
+ }
+ })
+ }
+}
+
+func Test_FindGapsInFingerprintBounds(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ err bool
+ exp []v1.FingerprintBounds
+ ownershipRange v1.FingerprintBounds
+ metas []v1.FingerprintBounds
+ }{
+ {
+ desc: "error nonoverlapping metas",
+ err: true,
+ exp: nil,
+ ownershipRange: v1.NewBounds(0, 10),
+ metas: []v1.FingerprintBounds{v1.NewBounds(11, 20)},
+ },
+ {
+ desc: "one meta with entire ownership range",
+ err: false,
+ exp: nil,
+ ownershipRange: v1.NewBounds(0, 10),
+ metas: []v1.FingerprintBounds{v1.NewBounds(0, 10)},
+ },
+ {
+ desc: "two non-overlapping metas with entire ownership range",
+ err: false,
+ exp: nil,
+ ownershipRange: v1.NewBounds(0, 10),
+ metas: []v1.FingerprintBounds{
+ v1.NewBounds(0, 5),
+ v1.NewBounds(6, 10),
+ },
+ },
+ {
+ desc: "two overlapping metas with entire ownership range",
+ err: false,
+ exp: nil,
+ ownershipRange: v1.NewBounds(0, 10),
+ metas: []v1.FingerprintBounds{
+ v1.NewBounds(0, 6),
+ v1.NewBounds(4, 10),
+ },
+ },
+ {
+ desc: "one meta with partial ownership range",
+ err: false,
+ exp: []v1.FingerprintBounds{
+ v1.NewBounds(6, 10),
+ },
+ ownershipRange: v1.NewBounds(0, 10),
+ metas: []v1.FingerprintBounds{
+ v1.NewBounds(0, 5),
+ },
+ },
+ {
+ desc: "smaller subsequent meta with partial ownership range",
+ err: false,
+ exp: []v1.FingerprintBounds{
+ v1.NewBounds(8, 10),
+ },
+ ownershipRange: v1.NewBounds(0, 10),
+ metas: []v1.FingerprintBounds{
+ v1.NewBounds(0, 7),
+ v1.NewBounds(3, 4),
+ },
+ },
+ {
+ desc: "hole in the middle",
+ err: false,
+ exp: []v1.FingerprintBounds{
+ v1.NewBounds(4, 5),
+ },
+ ownershipRange: v1.NewBounds(0, 10),
+ metas: []v1.FingerprintBounds{
+ v1.NewBounds(0, 3),
+ v1.NewBounds(6, 10),
+ },
+ },
+ {
+ desc: "holes on either end",
+ err: false,
+ exp: []v1.FingerprintBounds{
+ v1.NewBounds(0, 2),
+ v1.NewBounds(8, 10),
+ },
+ ownershipRange: v1.NewBounds(0, 10),
+ metas: []v1.FingerprintBounds{
+ v1.NewBounds(3, 5),
+ v1.NewBounds(6, 7),
+ },
+ },
+ {
+ desc: "full ownership range with single meta",
+ err: false,
+ exp: nil,
+ ownershipRange: v1.NewBounds(0, math.MaxUint64),
+ metas: []v1.FingerprintBounds{
+ v1.NewBounds(0, math.MaxUint64),
+ },
+ },
+ {
+ desc: "full ownership range with multiple metas",
+ err: false,
+ exp: nil,
+ ownershipRange: v1.NewBounds(0, math.MaxUint64),
+ // Three metas covering the whole 0 - MaxUint64
+ metas: []v1.FingerprintBounds{
+ v1.NewBounds(0, math.MaxUint64/3),
+ v1.NewBounds(math.MaxUint64/3+1, math.MaxUint64/2),
+ v1.NewBounds(math.MaxUint64/2+1, math.MaxUint64),
+ },
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ gaps, err := FindGapsInFingerprintBounds(tc.ownershipRange, tc.metas)
+ if tc.err {
+ require.Error(t, err)
+ return
+ }
+ require.Equal(t, tc.exp, gaps)
+ })
+ }
+}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index a563e80f789fe..e73369aca2d72 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -1566,6 +1566,11 @@ func (t *Loki) initBloomPlanner() (services.Service, error) {
return planner.New(
t.Cfg.BloomBuild.Planner,
+ t.Overrides,
+ t.Cfg.SchemaConfig,
+ t.Cfg.StorageConfig,
+ t.ClientMetrics,
+ t.BloomStore,
logger,
prometheus.DefaultRegisterer,
)
diff --git a/pkg/util/limiter/combined_limits.go b/pkg/util/limiter/combined_limits.go
index 39684c7b43e8e..92caf2c19d681 100644
--- a/pkg/util/limiter/combined_limits.go
+++ b/pkg/util/limiter/combined_limits.go
@@ -1,6 +1,8 @@
package limiter
import (
+ bloombuilder "github.com/grafana/loki/v3/pkg/bloombuild/builder"
+ bloomplanner "github.com/grafana/loki/v3/pkg/bloombuild/planner"
"github.com/grafana/loki/v3/pkg/bloomcompactor"
"github.com/grafana/loki/v3/pkg/bloomgateway"
"github.com/grafana/loki/v3/pkg/compactor"
@@ -26,4 +28,6 @@ type CombinedLimits interface {
indexgateway.Limits
bloomgateway.Limits
bloomcompactor.Limits
+ bloomplanner.Limits
+ bloombuilder.Limits
}
diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go
index ca33d1f4bf425..b0660686f5c1b 100644
--- a/pkg/validation/limits.go
+++ b/pkg/validation/limits.go
@@ -205,6 +205,9 @@ type Limits struct {
BloomCompactorMaxBlockSize flagext.ByteSize `yaml:"bloom_compactor_max_block_size" json:"bloom_compactor_max_block_size" category:"experimental"`
BloomCompactorMaxBloomSize flagext.ByteSize `yaml:"bloom_compactor_max_bloom_size" json:"bloom_compactor_max_bloom_size" category:"experimental"`
+ BloomCreationEnabled bool `yaml:"bloom_creation_enabled" json:"bloom_creation_enabled" category:"experimental"`
+ BloomSplitSeriesKeyspaceBy int `yaml:"bloom_split_series_keyspace_by" json:"bloom_split_series_keyspace_by" category:"experimental"`
+
BloomNGramLength int `yaml:"bloom_ngram_length" json:"bloom_ngram_length" category:"experimental"`
BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip" category:"experimental"`
BloomFalsePositiveRate float64 `yaml:"bloom_false_positive_rate" json:"bloom_false_positive_rate" category:"experimental"`
@@ -380,6 +383,9 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
),
)
+ f.BoolVar(&l.BloomCreationEnabled, "bloom-build.enable", false, "Experimental. Whether to create blooms for the tenant.")
+ f.IntVar(&l.BloomSplitSeriesKeyspaceBy, "bloom-build.split-keyspace-by", 256, "Experimental. Number of splits to create for the series keyspace when building blooms. The series keyspace is split into this many parts to parallelize bloom creation.")
+
_ = l.BloomCompactorMaxBloomSize.Set(defaultBloomCompactorMaxBloomSize)
f.Var(&l.BloomCompactorMaxBloomSize, "bloom-compactor.max-bloom-size",
fmt.Sprintf(
@@ -973,6 +979,14 @@ func (o *Overrides) BloomCompactorEnabled(userID string) bool {
return o.getOverridesForUser(userID).BloomCompactorEnabled
}
+func (o *Overrides) BloomCreationEnabled(userID string) bool {
+ return o.getOverridesForUser(userID).BloomCreationEnabled
+}
+
+func (o *Overrides) BloomSplitSeriesKeyspaceBy(userID string) int {
+ return o.getOverridesForUser(userID).BloomSplitSeriesKeyspaceBy
+}
+
func (o *Overrides) BloomNGramLength(userID string) int {
return o.getOverridesForUser(userID).BloomNGramLength
}
|
refactor
|
Compute gaps and build tasks from metas and TSDBs (#12994)
|
8c8fc33d7f1e8a3a345630483556b50397feb79b
|
2019-09-26 18:34:38
|
Xiang Dai
|
doc: skip jb init when using Tanka (#1068)
| false
|
diff --git a/docs/installation/tanka.md b/docs/installation/tanka.md
index 99beee8223207..67cc48bac1ca2 100644
--- a/docs/installation/tanka.md
+++ b/docs/installation/tanka.md
@@ -27,7 +27,6 @@ Grab the Loki module using `jb`:
```bash
$ go get -u github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
-$ jb init
$ jb install github.com/grafana/loki/production/ksonnet/loki
```
|
doc
|
skip jb init when using Tanka (#1068)
|
6dce98870d8c5c7054b3444d2fe4e66dad262a53
|
2024-04-18 20:01:26
|
Michel Hollands
|
fix: Fix the lokitool imports (#12673)
| false
|
diff --git a/cmd/lokitool/main.go b/cmd/lokitool/main.go
index 155705b07afa7..6b52fb0a3d657 100644
--- a/cmd/lokitool/main.go
+++ b/cmd/lokitool/main.go
@@ -8,7 +8,7 @@ import (
"github.com/prometheus/common/version"
- "github.com/grafana/loki/pkg/tool/commands"
+ "github.com/grafana/loki/v3/pkg/tool/commands"
)
var (
diff --git a/pkg/tool/client/rules.go b/pkg/tool/client/rules.go
index 40dd0e1a292be..d662794d81254 100644
--- a/pkg/tool/client/rules.go
+++ b/pkg/tool/client/rules.go
@@ -10,7 +10,7 @@ import (
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
// CreateRuleGroup creates a new rule group
diff --git a/pkg/tool/commands/rules.go b/pkg/tool/commands/rules.go
index d1e16c026b2a6..4abc14162eddd 100644
--- a/pkg/tool/commands/rules.go
+++ b/pkg/tool/commands/rules.go
@@ -15,10 +15,10 @@ import (
"gopkg.in/alecthomas/kingpin.v2"
yamlv3 "gopkg.in/yaml.v3"
- "github.com/grafana/loki/pkg/tool/client"
- "github.com/grafana/loki/pkg/tool/printer"
- "github.com/grafana/loki/pkg/tool/rules"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/client"
+ "github.com/grafana/loki/v3/pkg/tool/printer"
+ "github.com/grafana/loki/v3/pkg/tool/rules"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
const (
diff --git a/pkg/tool/commands/rules_test.go b/pkg/tool/commands/rules_test.go
index d1878f856cf5c..fe27da35f9d37 100644
--- a/pkg/tool/commands/rules_test.go
+++ b/pkg/tool/commands/rules_test.go
@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
func TestCheckDuplicates(t *testing.T) {
diff --git a/pkg/tool/printer/printer.go b/pkg/tool/printer/printer.go
index f85bd835a85de..084d483a07a45 100644
--- a/pkg/tool/printer/printer.go
+++ b/pkg/tool/printer/printer.go
@@ -13,8 +13,8 @@ import (
"github.com/mitchellh/colorstring"
"gopkg.in/yaml.v3"
- "github.com/grafana/loki/pkg/tool/rules"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
// Printer is used for printing formatted output from the cortextool
diff --git a/pkg/tool/printer/printer_test.go b/pkg/tool/printer/printer_test.go
index 5c9a84899cd35..c8650d9bd6101 100644
--- a/pkg/tool/printer/printer_test.go
+++ b/pkg/tool/printer/printer_test.go
@@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
func TestPrintRuleSet(t *testing.T) {
diff --git a/pkg/tool/rules/compare.go b/pkg/tool/rules/compare.go
index 728726037acbd..2d64c534e88d1 100644
--- a/pkg/tool/rules/compare.go
+++ b/pkg/tool/rules/compare.go
@@ -10,7 +10,7 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
yaml "gopkg.in/yaml.v3"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
var (
diff --git a/pkg/tool/rules/compare_test.go b/pkg/tool/rules/compare_test.go
index 0dfda624489b8..4df1aa2ee67af 100644
--- a/pkg/tool/rules/compare_test.go
+++ b/pkg/tool/rules/compare_test.go
@@ -6,7 +6,7 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
yaml "gopkg.in/yaml.v3"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
func Test_rulesEqual(t *testing.T) {
diff --git a/pkg/tool/rules/parser.go b/pkg/tool/rules/parser.go
index f4017c049f6ef..aa8f833630091 100644
--- a/pkg/tool/rules/parser.go
+++ b/pkg/tool/rules/parser.go
@@ -12,7 +12,7 @@ import (
log "github.com/sirupsen/logrus"
yaml "gopkg.in/yaml.v3"
- "github.com/grafana/loki/pkg/ruler"
+ "github.com/grafana/loki/v3/pkg/ruler"
)
const (
diff --git a/pkg/tool/rules/parser_test.go b/pkg/tool/rules/parser_test.go
index 68f9ff6d70f80..35db097486a81 100644
--- a/pkg/tool/rules/parser_test.go
+++ b/pkg/tool/rules/parser_test.go
@@ -6,7 +6,7 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
func TestParseFiles(t *testing.T) {
diff --git a/pkg/tool/rules/rules.go b/pkg/tool/rules/rules.go
index e2c216317c546..eccfbdabe45a4 100644
--- a/pkg/tool/rules/rules.go
+++ b/pkg/tool/rules/rules.go
@@ -8,9 +8,9 @@ import (
"github.com/prometheus/prometheus/promql/parser"
log "github.com/sirupsen/logrus"
- logql "github.com/grafana/loki/pkg/logql/syntax"
+ logql "github.com/grafana/loki/v3/pkg/logql/syntax"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
// RuleNamespace is used to parse a slightly modified prometheus
diff --git a/pkg/tool/rules/rules_test.go b/pkg/tool/rules/rules_test.go
index 690316db2d182..fba13040d49b8 100644
--- a/pkg/tool/rules/rules_test.go
+++ b/pkg/tool/rules/rules_test.go
@@ -8,7 +8,7 @@ import (
"gopkg.in/yaml.v3"
"gotest.tools/assert"
- "github.com/grafana/loki/pkg/tool/rules/rwrulefmt"
+ "github.com/grafana/loki/v3/pkg/tool/rules/rwrulefmt"
)
func TestAggregateBy(t *testing.T) {
|
fix
|
Fix the lokitool imports (#12673)
|
e4a5733ada7fd23d7aba94bc1f2a82db109d27c1
|
2024-03-25 16:49:58
|
Salva Corts
|
fix(blooms): Extract only line filters before line format expressions (#12334)
| false
|
diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go
index f76d6a55d2a09..97f555cf43073 100644
--- a/pkg/bloomgateway/bloomgateway.go
+++ b/pkg/bloomgateway/bloomgateway.go
@@ -218,7 +218,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
return nil, errors.New("from time must not be after through time")
}
- filters := syntax.ExtractLineFilters(req.Plan.AST)
+ filters := v1.ExtractTestableLineFilters(req.Plan.AST)
g.metrics.receivedFilters.Observe(float64(len(filters)))
// Shortcut if request does not contain filters
diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go
index 171936d9e39c5..32f14b7e668c9 100644
--- a/pkg/bloomgateway/querier.go
+++ b/pkg/bloomgateway/querier.go
@@ -10,8 +10,8 @@ import (
"github.com/prometheus/common/model"
"github.com/grafana/loki/pkg/logproto"
- "github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/querier/plan"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/util/constants"
)
@@ -73,7 +73,7 @@ func convertToShortRef(ref *logproto.ChunkRef) *logproto.ShortRef {
func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from, through model.Time, chunkRefs []*logproto.ChunkRef, queryPlan plan.QueryPlan) ([]*logproto.ChunkRef, error) {
// Shortcut that does not require any filtering
- if len(chunkRefs) == 0 || len(syntax.ExtractLineFilters(queryPlan.AST)) == 0 {
+ if len(chunkRefs) == 0 || len(v1.ExtractTestableLineFilters(queryPlan.AST)) == 0 {
return chunkRefs, nil
}
diff --git a/pkg/storage/bloom/v1/bloom_tester.go b/pkg/storage/bloom/v1/bloom_tester.go
index 956e3af4cfe32..ab9cbcc64a653 100644
--- a/pkg/storage/bloom/v1/bloom_tester.go
+++ b/pkg/storage/bloom/v1/bloom_tester.go
@@ -35,6 +35,39 @@ func (b BloomTests) MatchesWithPrefixBuf(bloom filter.Checker, buf []byte, prefi
return true
}
+// ExtractTestableLineFilters extracts all line filters from an expression
+// that can be tested against a bloom filter. This will skip any line filters
+// after a line format expression. A line format expression might add content
+// that the query later matches against, which can't be tested with a bloom filter.
+// E.g. For {app="fake"} |= "foo" | line_format "thisNewTextShouldMatch" |= "thisNewTextShouldMatch"
+// this function will return only the line filter for "foo" since the line filter for "thisNewTextShouldMatch"
+// wouldn't match against the bloom filter but should match against the query.
+func ExtractTestableLineFilters(expr syntax.Expr) []syntax.LineFilterExpr {
+ if expr == nil {
+ return nil
+ }
+
+ var filters []syntax.LineFilterExpr
+ var lineFmtFound bool
+ visitor := &syntax.DepthFirstTraversal{
+ VisitLineFilterFn: func(v syntax.RootVisitor, e *syntax.LineFilterExpr) {
+ if e != nil && !lineFmtFound {
+ filters = append(filters, *e)
+ }
+ },
+ VisitLineFmtFn: func(v syntax.RootVisitor, e *syntax.LineFmtExpr) {
+ if e != nil {
+ lineFmtFound = true
+ }
+ },
+ }
+ expr.Accept(visitor)
+ return filters
+}
+
+// FiltersToBloomTest converts a list of line filters to a BloomTest.
+// Note that all the line filters should be testable against a bloom filter.
+// Use ExtractTestableLineFilters to extract testable line filters from an expression.
// TODO(owen-d): limits the number of bloom lookups run.
// An arbitrarily high number can overconsume cpu and is a DoS vector.
func FiltersToBloomTest(b NGramBuilder, filters ...syntax.LineFilterExpr) BloomTest {
diff --git a/pkg/storage/bloom/v1/bloom_tester_test.go b/pkg/storage/bloom/v1/bloom_tester_test.go
index c873887acac38..46884140ad59e 100644
--- a/pkg/storage/bloom/v1/bloom_tester_test.go
+++ b/pkg/storage/bloom/v1/bloom_tester_test.go
@@ -154,14 +154,19 @@ func TestFiltersToBloomTests(t *testing.T) {
bloom: fakeBloom{"foo", "bar", "baz", "fuzz", "noz"},
expectMatch: false,
},
+ {
+ name: "line filter after line format",
+ query: `{app="fake"} |= "foo" | line_format "thisNewTextShouldMatch" |= "thisNewTextShouldMatch"`,
+ bloom: fakeBloom{"foo"},
+ expectMatch: true,
+ },
} {
t.Run(tc.name, func(t *testing.T) {
expr, err := syntax.ParseExpr(tc.query)
assert.NoError(t, err)
- filters := syntax.ExtractLineFilters(expr)
+ filters := ExtractTestableLineFilters(expr)
bloomTests := FiltersToBloomTest(fakeNgramBuilder{}, filters...)
-
assert.Equal(t, tc.expectMatch, bloomTests.Matches(tc.bloom))
})
}
diff --git a/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go b/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go
index a2325bd5c51bb..161323defd9a1 100644
--- a/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go
+++ b/pkg/storage/stores/shipper/indexshipper/indexgateway/gateway.go
@@ -18,6 +18,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/querier/plan"
+ v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores"
@@ -229,7 +230,7 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ
// Extract LineFiltersExpr from the plan. If there is none, we can short-circuit and return before making a req
// to the bloom-gateway (through the g.bloomQuerier)
- if len(syntax.ExtractLineFilters(req.Plan.AST)) == 0 {
+ if len(v1.ExtractTestableLineFilters(req.Plan.AST)) == 0 {
return result, nil
}
|
fix
|
Extract only line filters before line format expressions (#12334)
|
d4cdc376820867cd88248a9b6874e18b1cdac6e6
|
2022-07-18 20:10:01
|
Pablo
|
promtail: Inject tenant ID when receiving X-Scope-OrgID in heroku target (#6695)
| false
|
diff --git a/clients/pkg/promtail/targets/heroku/target.go b/clients/pkg/promtail/targets/heroku/target.go
index d794e6c488288..0b64efbafa402 100644
--- a/clients/pkg/promtail/targets/heroku/target.go
+++ b/clients/pkg/promtail/targets/heroku/target.go
@@ -19,6 +19,7 @@ import (
"github.com/weaveworks/common/server"
"github.com/grafana/loki/clients/pkg/promtail/api"
+ lokiClient "github.com/grafana/loki/clients/pkg/promtail/client"
"github.com/grafana/loki/clients/pkg/promtail/scrapeconfig"
"github.com/grafana/loki/clients/pkg/promtail/targets/target"
@@ -130,12 +131,19 @@ func (h *Target) drain(w http.ResponseWriter, r *http.Request) {
ts = message.Timestamp
}
+ // If the incoming request carries the tenant id, inject it as the reserved label so it's used by the
+ // remote write client.
+ tenantIDHeaderValue := r.Header.Get("X-Scope-OrgID")
+ if tenantIDHeaderValue != "" {
+ lb.Set(lokiClient.ReservedLabelTenantID, tenantIDHeaderValue)
+ }
+
processed := relabel.Process(lb.Labels(), h.relabelConfigs...)
// Start with the set of labels fixed in the configuration
filtered := h.Labels().Clone()
for _, lbl := range processed {
- if strings.HasPrefix(lbl.Name, "__") {
+ if strings.HasPrefix(lbl.Name, "__") && lbl.Name != lokiClient.ReservedLabelTenantID {
continue
}
filtered[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value)
diff --git a/clients/pkg/promtail/targets/heroku/target_test.go b/clients/pkg/promtail/targets/heroku/target_test.go
index c3c308afeb72e..20bf4a67f87b1 100644
--- a/clients/pkg/promtail/targets/heroku/target_test.go
+++ b/clients/pkg/promtail/targets/heroku/target_test.go
@@ -18,6 +18,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/weaveworks/common/server"
+ lokiClient "github.com/grafana/loki/clients/pkg/promtail/client"
"github.com/grafana/loki/clients/pkg/promtail/client/fake"
"github.com/grafana/loki/clients/pkg/promtail/scrapeconfig"
)
@@ -184,12 +185,7 @@ func TestHerokuDrainTarget(t *testing.T) {
require.NoError(t, err)
require.Equal(t, http.StatusNoContent, res.StatusCode, "expected no-content status code")
- // Wait for them to appear in the test handler
- countdown := 1000
- for len(eh.Received()) != 1 && countdown > 0 {
- time.Sleep(1 * time.Millisecond)
- countdown--
- }
+ waitForMessages(eh)
// Make sure we didn't timeout
require.Equal(t, len(tc.args.RequestBodies), len(eh.Received()))
@@ -247,12 +243,7 @@ func TestHerokuDrainTarget_UseIncomingTimestamp(t *testing.T) {
require.NoError(t, err)
require.Equal(t, http.StatusNoContent, res.StatusCode, "expected no-content status code")
- // Wait for them to appear in the test handler
- countdown := 1000
- for len(eh.Received()) != 1 && countdown > 0 {
- time.Sleep(1 * time.Millisecond)
- countdown--
- }
+ waitForMessages(eh)
// Make sure we didn't timeout
require.Equal(t, 1, len(eh.Received()))
@@ -288,6 +279,56 @@ func TestHerokuDrainTarget_ErrorOnNotPrometheusCompatibleJobName(t *testing.T) {
}
}
+func TestHerokuDrainTarget_UseTenantIDHeaderIfPresent(t *testing.T) {
+ w := log.NewSyncWriter(os.Stderr)
+ logger := log.NewLogfmtLogger(w)
+
+ // Create fake promtail client
+ eh := fake.New(func() {})
+ defer eh.Stop()
+
+ serverConfig, port, err := getServerConfigWithAvailablePort()
+ require.NoError(t, err, "error generating server config or finding open port")
+ config := &scrapeconfig.HerokuDrainTargetConfig{
+ Server: serverConfig,
+ Labels: nil,
+ UseIncomingTimestamp: true,
+ }
+
+ prometheus.DefaultRegisterer = prometheus.NewRegistry()
+ metrics := NewMetrics(prometheus.DefaultRegisterer)
+ pt, err := NewTarget(metrics, logger, eh, "test_job", config, nil)
+ require.NoError(t, err)
+ defer func() {
+ _ = pt.Stop()
+ }()
+
+ // Clear received lines after test case is ran
+ defer eh.Clear()
+
+ req, err := makeDrainRequest(fmt.Sprintf("http://%s:%d", localhost, port), testLogLine1)
+ require.NoError(t, err, "expected test drain request to be successfully created")
+ req.Header.Set("X-Scope-OrgID", "42")
+ res, err := http.DefaultClient.Do(req)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusNoContent, res.StatusCode, "expected no-content status code")
+
+ waitForMessages(eh)
+
+ // Make sure we didn't timeout
+ require.Equal(t, 1, len(eh.Received()))
+
+ require.Equal(t, model.LabelValue("42"), eh.Received()[0].Labels[lokiClient.ReservedLabelTenantID])
+}
+
+func waitForMessages(eh *fake.Client) {
+ countdown := 1000
+ for len(eh.Received()) != 1 && countdown > 0 {
+ time.Sleep(1 * time.Millisecond)
+ countdown--
+ }
+}
+
func getServerConfigWithAvailablePort() (cfg server.Config, port int, err error) {
// Get a randomly available port by open and closing a TCP socket
addr, err := net.ResolveTCPAddr("tcp", localhost+":0")
|
promtail
|
Inject tenant ID when receiving X-Scope-OrgID in heroku target (#6695)
|
c300086a2be20610ff38f96e298a0d2f330ddbd5
|
2024-11-19 15:32:47
|
Salva Corts
|
docs: Add upgrade guide for new bloom blocks (#15012)
| false
|
diff --git a/docs/sources/setup/upgrade/_index.md b/docs/sources/setup/upgrade/_index.md
index ae4a80e8909ac..dbfb98dad6244 100644
--- a/docs/sources/setup/upgrade/_index.md
+++ b/docs/sources/setup/upgrade/_index.md
@@ -76,6 +76,18 @@ Their YAML counterparts in the `limits_config` block are kept identical.
All other CLI arguments (and their YAML counterparts) prefixed with `-bloom-compactor.` have been removed.
+
+## 3.3.0
+
+### Loki
+
+#### Experimental Bloom Filters
+
+With Loki 3.3.0, the bloom block format changed and any previously created block is incompatible with the new format.
+Before upgrading, we recommend deleting all the existing bloom blocks in the object store. We store bloom blocks and
+metas inside the `bloom` path in the configured object store. To get rid of all the bloom blocks, delete all the objects
+inside the `bloom` path in the object store.
+
## 3.0.0
{{< admonition type="note" >}}
|
docs
|
Add upgrade guide for new bloom blocks (#15012)
|
3a02d64a04ffe53f11008b66919e62644401240b
|
2025-02-04 12:21:13
|
renovate[bot]
|
chore(deps): update dependency @types/node to v22.13.1 (main) (#16077)
| false
|
diff --git a/pkg/dataobj/explorer/ui/package-lock.json b/pkg/dataobj/explorer/ui/package-lock.json
index ef3e50768ae31..37f8b3267a699 100644
--- a/pkg/dataobj/explorer/ui/package-lock.json
+++ b/pkg/dataobj/explorer/ui/package-lock.json
@@ -1194,9 +1194,9 @@
"license": "MIT"
},
"node_modules/@types/node": {
- "version": "22.13.0",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.0.tgz",
- "integrity": "sha512-ClIbNe36lawluuvq3+YYhnIN2CELi+6q8NpnM7PYp4hBn/TatfboPgVSm2rwKRfnV2M+Ty9GWDFI64KEe+kysA==",
+ "version": "22.13.1",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.1.tgz",
+ "integrity": "sha512-jK8uzQlrvXqEU91UxiK5J7pKHyzgnI1Qnl0QDHIgVGuolJhRb9EEl28Cj9b3rGR8B2lhFCtvIm5os8lFnO/1Ew==",
"dev": true,
"license": "MIT",
"dependencies": {
|
chore
|
update dependency @types/node to v22.13.1 (main) (#16077)
|
cdf084fdaeaf632e7c078022c6ad4322bfef2989
|
2024-09-20 19:25:56
|
Christian Haudum
|
perf(blooms): Remove compression of `.tar` archived bloom blocks (#14159)
| false
|
diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go
index 78932e3f3e9ec..fdeab9cf92c75 100644
--- a/pkg/bloombuild/builder/builder.go
+++ b/pkg/bloombuild/builder/builder.go
@@ -33,6 +33,9 @@ import (
"github.com/grafana/loki/v3/pkg/util/ring"
)
+// TODO(chaudum): Make configurable via (per-tenant?) setting.
+var blockCompressionAlgo = compression.EncNone
+
type Builder struct {
services.Service
@@ -404,7 +407,7 @@ func (b *Builder) processTask(
blockCt++
blk := newBlocks.At()
- built, err := bloomshipper.BlockFrom(tenant, task.Table.Addr(), blk)
+ built, err := bloomshipper.BlockFrom(blockCompressionAlgo, tenant, task.Table.Addr(), blk)
if err != nil {
level.Error(logger).Log("msg", "failed to build block", "err", err)
if err = blk.Reader().Cleanup(); err != nil {
diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go
index 482d277589c37..ea780c98e8eed 100644
--- a/pkg/bloombuild/planner/planner_test.go
+++ b/pkg/bloombuild/planner/planner_test.go
@@ -202,7 +202,7 @@ func genBlock(ref bloomshipper.BlockRef) (bloomshipper.Block, error) {
block := v1.NewBlock(reader, v1.NewMetrics(nil))
buf := bytes.NewBuffer(nil)
- if err := v1.TarGz(buf, block.Reader()); err != nil {
+ if err := v1.TarCompress(ref.Encoding, buf, block.Reader()); err != nil {
return bloomshipper.Block{}, err
}
@@ -1019,7 +1019,7 @@ func Test_deleteOutdatedMetas(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
logger := log.NewNopLogger()
- //logger := log.NewLogfmtLogger(os.Stdout)
+ // logger := log.NewLogfmtLogger(os.Stdout)
cfg := Config{
PlanningInterval: 1 * time.Hour,
diff --git a/pkg/bloomgateway/bloomgateway_test.go b/pkg/bloomgateway/bloomgateway_test.go
index 8fdc3989510a3..698b1ecf40e47 100644
--- a/pkg/bloomgateway/bloomgateway_test.go
+++ b/pkg/bloomgateway/bloomgateway_test.go
@@ -165,7 +165,7 @@ func TestBloomGateway_FilterChunkRefs(t *testing.T) {
Through: now,
Refs: groupRefs(t, chunkRefs),
Plan: plan.QueryPlan{AST: expr},
- Blocks: []string{"bloom/invalid/block.tar.gz"},
+ Blocks: []string{"bloom/invalid/block.tar"},
}
ctx := user.InjectOrgID(context.Background(), tenantID)
diff --git a/pkg/compression/encoding.go b/pkg/compression/encoding.go
index 6b421ed976441..ecef31f09325b 100644
--- a/pkg/compression/encoding.go
+++ b/pkg/compression/encoding.go
@@ -13,7 +13,7 @@ type Encoding byte
const (
EncNone Encoding = iota
EncGZIP
- EncDumb
+ EncDumb // not supported
EncLZ4_64k
EncSnappy
EncLZ4_256k
@@ -41,8 +41,6 @@ func (e Encoding) String() string {
return "gzip"
case EncNone:
return "none"
- case EncDumb:
- return "dumb"
case EncLZ4_64k:
return "lz4-64k"
case EncLZ4_256k:
@@ -70,7 +68,6 @@ func ParseEncoding(enc string) (Encoding, error) {
}
}
return 0, fmt.Errorf("invalid encoding: %s, supported: %s", enc, SupportedEncoding())
-
}
// SupportedEncoding returns the list of supported Encoding.
diff --git a/pkg/compression/fileext.go b/pkg/compression/fileext.go
new file mode 100644
index 0000000000000..8cd09c392d082
--- /dev/null
+++ b/pkg/compression/fileext.go
@@ -0,0 +1,50 @@
+package compression
+
+import "fmt"
+
+const (
+ ExtNone = ""
+ ExtGZIP = ".gz"
+ ExtSnappy = ".sz"
+ ExtLZ4 = ".lz4"
+ ExtFlate = ".zz"
+ ExtZstd = ".zst"
+)
+
+func ToFileExtension(e Encoding) string {
+ switch e {
+ case EncNone:
+ return ExtNone
+ case EncGZIP:
+ return ExtGZIP
+ case EncLZ4_64k, EncLZ4_256k, EncLZ4_1M, EncLZ4_4M:
+ return ExtLZ4
+ case EncSnappy:
+ return ExtSnappy
+ case EncFlate:
+ return ExtFlate
+ case EncZstd:
+ return ExtZstd
+ default:
+ panic(fmt.Sprintf("invalid encoding: %d, supported: %s", e, SupportedEncoding()))
+ }
+}
+
+func FromFileExtension(ext string) Encoding {
+ switch ext {
+ case ExtNone:
+ return EncNone
+ case ExtGZIP:
+ return EncGZIP
+ case ExtLZ4:
+ return EncLZ4_4M
+ case ExtSnappy:
+ return EncSnappy
+ case ExtFlate:
+ return EncFlate
+ case ExtZstd:
+ return EncZstd
+ default:
+ panic(fmt.Sprintf("invalid file extension: %s", ext))
+ }
+}
diff --git a/pkg/storage/bloom/v1/archive.go b/pkg/storage/bloom/v1/archive.go
index 201b071b25000..fce83d69e41d9 100644
--- a/pkg/storage/bloom/v1/archive.go
+++ b/pkg/storage/bloom/v1/archive.go
@@ -11,22 +11,34 @@ import (
"github.com/grafana/loki/v3/pkg/compression"
)
+const (
+ ExtTar = ".tar"
+)
+
type TarEntry struct {
Name string
Size int64
Body io.ReadSeeker
}
-func TarGz(dst io.Writer, reader BlockReader) error {
+func TarCompress(enc compression.Encoding, dst io.Writer, reader BlockReader) error {
+ comprPool := compression.GetWriterPool(enc)
+ comprWriter := comprPool.GetWriter(dst)
+ defer func() {
+ comprWriter.Close()
+ comprPool.PutWriter(comprWriter)
+ }()
+
+ return Tar(comprWriter, reader)
+}
+
+func Tar(dst io.Writer, reader BlockReader) error {
itr, err := reader.TarEntries()
if err != nil {
return errors.Wrap(err, "error getting tar entries")
}
- gzipper := compression.GetWriterPool(compression.EncGZIP).GetWriter(dst)
- defer gzipper.Close()
-
- tarballer := tar.NewWriter(gzipper)
+ tarballer := tar.NewWriter(dst)
defer tarballer.Close()
for itr.Next() {
@@ -49,13 +61,19 @@ func TarGz(dst io.Writer, reader BlockReader) error {
return itr.Err()
}
-func UnTarGz(dst string, r io.Reader) error {
- gzipper, err := compression.GetReaderPool(compression.EncGZIP).GetReader(r)
+func UnTarCompress(enc compression.Encoding, dst string, r io.Reader) error {
+ comprPool := compression.GetReaderPool(enc)
+ comprReader, err := comprPool.GetReader(r)
if err != nil {
- return errors.Wrap(err, "error getting gzip reader")
+ return errors.Wrapf(err, "error getting %s reader", enc.String())
}
+ defer comprPool.PutReader(comprReader)
+
+ return UnTar(dst, comprReader)
+}
- tarballer := tar.NewReader(gzipper)
+func UnTar(dst string, r io.Reader) error {
+ tarballer := tar.NewReader(r)
for {
header, err := tarballer.Next()
diff --git a/pkg/storage/bloom/v1/archive_test.go b/pkg/storage/bloom/v1/archive_test.go
index e0d2f69a1c841..b7857a4b5ed11 100644
--- a/pkg/storage/bloom/v1/archive_test.go
+++ b/pkg/storage/bloom/v1/archive_test.go
@@ -24,7 +24,7 @@ func TestArchive(t *testing.T) {
BlockOptions{
Schema: Schema{
version: CurrentSchemaVersion,
- encoding: compression.EncSnappy,
+ encoding: compression.EncNone,
},
SeriesPageSize: 100,
BloomPageSize: 10 << 10,
@@ -40,9 +40,9 @@ func TestArchive(t *testing.T) {
reader := NewDirectoryBlockReader(dir1)
w := bytes.NewBuffer(nil)
- require.Nil(t, TarGz(w, reader))
+ require.Nil(t, Tar(w, reader))
- require.Nil(t, UnTarGz(dir2, w))
+ require.Nil(t, UnTar(dir2, w))
reader2 := NewDirectoryBlockReader(dir2)
@@ -78,3 +78,88 @@ func TestArchive(t *testing.T) {
require.Nil(t, err)
require.Equal(t, srcBloomsBytes, dstBloomsBytes)
}
+
+func TestArchiveCompression(t *testing.T) {
+ t.Parallel()
+ for _, tc := range []struct {
+ enc compression.Encoding
+ }{
+ {compression.EncNone},
+ {compression.EncGZIP},
+ {compression.EncSnappy},
+ {compression.EncLZ4_64k},
+ {compression.EncLZ4_256k},
+ {compression.EncLZ4_1M},
+ {compression.EncLZ4_4M},
+ {compression.EncFlate},
+ {compression.EncZstd},
+ } {
+ t.Run(tc.enc.String(), func(t *testing.T) {
+ // for writing files to two dirs for comparison and ensuring they're equal
+ dir1 := t.TempDir()
+ dir2 := t.TempDir()
+
+ numSeries := 100
+ data, _ := MkBasicSeriesWithBlooms(numSeries, 0x0000, 0xffff, 0, 10000)
+
+ builder, err := NewBlockBuilder(
+ BlockOptions{
+ Schema: Schema{
+ version: CurrentSchemaVersion,
+ encoding: compression.EncNone,
+ },
+ SeriesPageSize: 100,
+ BloomPageSize: 10 << 10,
+ },
+ NewDirectoryBlockWriter(dir1),
+ )
+
+ require.Nil(t, err)
+ itr := v2.NewSliceIter[SeriesWithBlooms](data)
+ _, err = builder.BuildFrom(itr)
+ require.Nil(t, err)
+
+ reader := NewDirectoryBlockReader(dir1)
+
+ w := bytes.NewBuffer(nil)
+ require.Nil(t, TarCompress(tc.enc, w, reader))
+
+ require.Nil(t, UnTarCompress(tc.enc, dir2, w))
+
+ reader2 := NewDirectoryBlockReader(dir2)
+
+ // Check Index is byte for byte equivalent
+ srcIndex, err := reader.Index()
+ require.Nil(t, err)
+ _, err = srcIndex.Seek(0, io.SeekStart)
+ require.Nil(t, err)
+ dstIndex, err := reader2.Index()
+ require.Nil(t, err)
+ _, err = dstIndex.Seek(0, io.SeekStart)
+ require.Nil(t, err)
+
+ srcIndexBytes, err := io.ReadAll(srcIndex)
+ require.Nil(t, err)
+ dstIndexBytes, err := io.ReadAll(dstIndex)
+ require.Nil(t, err)
+ require.Equal(t, srcIndexBytes, dstIndexBytes)
+
+ // Check Blooms is byte for byte equivalent
+ srcBlooms, err := reader.Blooms()
+ require.Nil(t, err)
+ _, err = srcBlooms.Seek(0, io.SeekStart)
+ require.Nil(t, err)
+ dstBlooms, err := reader2.Blooms()
+ require.Nil(t, err)
+ _, err = dstBlooms.Seek(0, io.SeekStart)
+ require.Nil(t, err)
+
+ srcBloomsBytes, err := io.ReadAll(srcBlooms)
+ require.Nil(t, err)
+ dstBloomsBytes, err := io.ReadAll(dstBlooms)
+ require.Nil(t, err)
+ require.Equal(t, srcBloomsBytes, dstBloomsBytes)
+
+ })
+ }
+}
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go
index 203d15684502e..838866e1dee81 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache.go
@@ -94,15 +94,15 @@ func loadBlockDirectories(root string, logger log.Logger) (keys []string, values
return nil
}
- ref, err := resolver.ParseBlockKey(key(path))
+ // The block file extension (.tar) needs to be added so the key can be parsed.
+ // This is because the extension is stripped off when the tar archive is extracted.
+ ref, err := resolver.ParseBlockKey(key(path + blockExtension))
if err != nil {
return nil
}
if ok, clean := isBlockDir(path, logger); ok {
- // the cache key must not contain the directory prefix
- // therefore we use the defaultKeyResolver to resolve the block's address
- key := defaultKeyResolver{}.Block(ref).Addr()
+ key := cacheKey(ref)
keys = append(keys, key)
values = append(values, NewBlockDirectory(ref, path))
level.Debug(logger).Log("msg", "found block directory", "path", path, "key", key)
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache_test.go b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
index 941b7fa29e99a..763036e56ac76 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
@@ -12,6 +12,7 @@ import (
"github.com/go-kit/log"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper/config"
)
@@ -63,7 +64,8 @@ func Test_LoadBlocksDirIntoCache(t *testing.T) {
wd := t.TempDir()
// plain file
- fp, _ := os.Create(filepath.Join(wd, "regular-file.tar.gz"))
+ ext := blockExtension + compression.ExtGZIP
+ fp, _ := os.Create(filepath.Join(wd, "regular-file"+ext))
fp.Close()
// invalid directory
@@ -99,8 +101,8 @@ func Test_LoadBlocksDirIntoCache(t *testing.T) {
require.Equal(t, 1, len(c.entries))
- key := validDir + ".tar.gz" // cache key must not contain directory prefix
- elem, found := c.entries[key]
+ // cache key does neither contain directory prefix nor file extension suffix
+ elem, found := c.entries[validDir]
require.True(t, found)
blockDir := elem.Value.(*Entry).Value
require.Equal(t, filepath.Join(wd, validDir), blockDir.Path)
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 2ce0e0a149ee3..1390b0d9c52e8 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -7,7 +7,6 @@ import (
"fmt"
"hash"
"io"
- "strings"
"sync"
"time"
@@ -18,6 +17,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/model"
+ "github.com/grafana/loki/v3/pkg/compression"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
@@ -73,6 +73,7 @@ func (r Ref) Interval() Interval {
type BlockRef struct {
Ref
+ compression.Encoding
}
func (r BlockRef) String() string {
@@ -208,29 +209,31 @@ func (c ClosableReadSeekerAdapter) Close() error {
return nil
}
-func BlockRefFrom(tenant, table string, md v1.BlockMetadata) BlockRef {
- return BlockRef{
- Ref: Ref{
- TenantID: tenant,
- TableName: table,
- Bounds: md.Series.Bounds,
- StartTimestamp: md.Series.FromTs,
- EndTimestamp: md.Series.ThroughTs,
- Checksum: md.Checksum,
- },
+func newRefFrom(tenant, table string, md v1.BlockMetadata) Ref {
+ return Ref{
+ TenantID: tenant,
+ TableName: table,
+ Bounds: md.Series.Bounds,
+ StartTimestamp: md.Series.FromTs,
+ EndTimestamp: md.Series.ThroughTs,
+ Checksum: md.Checksum,
}
}
-func BlockFrom(tenant, table string, blk *v1.Block) (Block, error) {
+func newBlockRefWithEncoding(ref Ref, enc compression.Encoding) BlockRef {
+ return BlockRef{Ref: ref, Encoding: enc}
+}
+
+func BlockFrom(enc compression.Encoding, tenant, table string, blk *v1.Block) (Block, error) {
md, _ := blk.Metadata()
- ref := BlockRefFrom(tenant, table, md)
+ ref := newBlockRefWithEncoding(newRefFrom(tenant, table, md), enc)
// TODO(owen-d): pool
buf := bytes.NewBuffer(nil)
- err := v1.TarGz(buf, blk.Reader())
+ err := v1.TarCompress(ref.Encoding, buf, blk.Reader())
if err != nil {
- return Block{}, errors.Wrap(err, "archiving+compressing block")
+ return Block{}, err
}
reader := bytes.NewReader(buf.Bytes())
@@ -320,15 +323,14 @@ func (b *BloomClient) GetBlock(ctx context.Context, ref BlockRef) (BlockDirector
}
defer rc.Close()
- path := b.fsResolver.Block(ref).LocalPath()
- // the block directory should not contain the .tar.gz extension
- path = strings.TrimSuffix(path, ".tar.gz")
+ // the block directory must not contain the .tar(.compression) extension
+ path := localFilePathWithoutExtension(ref, b.fsResolver)
err = util.EnsureDirectory(path)
if err != nil {
return BlockDirectory{}, fmt.Errorf("failed to create block directory %s: %w", path, err)
}
- err = v1.UnTarGz(path, rc)
+ err = v1.UnTarCompress(ref.Encoding, path, rc)
if err != nil {
return BlockDirectory{}, fmt.Errorf("failed to extract block file %s: %w", key, err)
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client_test.go b/pkg/storage/stores/shipper/bloomshipper/client_test.go
index dff01dcae50a3..13ce7a7c97ae6 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client_test.go
@@ -14,12 +14,25 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/compression"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/testutils"
"github.com/grafana/loki/v3/pkg/storage/config"
)
+var supportedCompressions = []compression.Encoding{
+ compression.EncNone,
+ compression.EncGZIP,
+ compression.EncSnappy,
+ compression.EncLZ4_64k,
+ compression.EncLZ4_256k,
+ compression.EncLZ4_1M,
+ compression.EncLZ4_4M,
+ compression.EncFlate,
+ compression.EncZstd,
+}
+
func parseTime(s string) model.Time {
t, err := time.Parse("2006-01-02 15:04", s)
if err != nil {
@@ -196,18 +209,18 @@ func TestBloomClient_DeleteMetas(t *testing.T) {
})
}
-func putBlock(t *testing.T, c *BloomClient, tenant string, start model.Time, minFp, maxFp model.Fingerprint) (Block, error) {
+func putBlock(t *testing.T, c *BloomClient, tenant string, start model.Time, minFp, maxFp model.Fingerprint, enc compression.Encoding) (Block, error) {
step := int64((24 * time.Hour).Seconds())
day := start.Unix() / step
tmpDir := t.TempDir()
- fp, _ := os.CreateTemp(t.TempDir(), "*.tar.gz")
+ fp, _ := os.CreateTemp(t.TempDir(), "*"+blockExtension+compression.ToFileExtension(enc))
blockWriter := v1.NewDirectoryBlockWriter(tmpDir)
err := blockWriter.Init()
require.NoError(t, err)
- err = v1.TarGz(fp, v1.NewDirectoryBlockReader(tmpDir))
+ err = v1.TarCompress(enc, fp, v1.NewDirectoryBlockReader(tmpDir))
require.NoError(t, err)
_, _ = fp.Seek(0, 0)
@@ -221,40 +234,48 @@ func putBlock(t *testing.T, c *BloomClient, tenant string, start model.Time, min
StartTimestamp: start,
EndTimestamp: start.Add(12 * time.Hour),
},
+ Encoding: enc,
},
Data: fp,
}
- return block, c.client.PutObject(context.Background(), c.Block(block.BlockRef).Addr(), block.Data)
+ key := c.Block(block.BlockRef).Addr()
+ t.Logf("PUT block to storage: %s", key)
+ return block, c.client.PutObject(context.Background(), key, block.Data)
}
func TestBloomClient_GetBlock(t *testing.T) {
- c, _ := newMockBloomClient(t)
- ctx := context.Background()
-
- b, err := putBlock(t, c, "tenant", parseTime("2024-02-05 00:00"), 0x0000, 0xffff)
- require.NoError(t, err)
+ for _, enc := range supportedCompressions {
+ c, _ := newMockBloomClient(t)
+ ctx := context.Background()
- t.Run("exists", func(t *testing.T) {
- blockDir, err := c.GetBlock(ctx, b.BlockRef)
+ b, err := putBlock(t, c, "tenant", parseTime("2024-02-05 00:00"), 0x0000, 0xffff, enc)
require.NoError(t, err)
- require.Equal(t, b.BlockRef, blockDir.BlockRef)
- })
- t.Run("does not exist", func(t *testing.T) {
- blockDir, err := c.GetBlock(ctx, BlockRef{})
- require.Error(t, err)
- require.True(t, c.client.IsObjectNotFoundErr(err))
- require.Equal(t, blockDir, BlockDirectory{})
- })
+ t.Run(enc.String(), func(t *testing.T) {
+
+ t.Run("exists", func(t *testing.T) {
+ blockDir, err := c.GetBlock(ctx, b.BlockRef)
+ require.NoError(t, err)
+ require.Equal(t, b.BlockRef, blockDir.BlockRef)
+ })
+
+ t.Run("does not exist", func(t *testing.T) {
+ blockDir, err := c.GetBlock(ctx, BlockRef{})
+ require.Error(t, err)
+ require.True(t, c.client.IsObjectNotFoundErr(err))
+ require.Equal(t, blockDir, BlockDirectory{})
+ })
+ })
+ }
}
func TestBloomClient_GetBlocks(t *testing.T) {
c, _ := newMockBloomClient(t)
ctx := context.Background()
- b1, err := putBlock(t, c, "tenant", parseTime("2024-02-05 00:00"), 0x0000, 0x0fff)
+ b1, err := putBlock(t, c, "tenant", parseTime("2024-02-05 00:00"), 0x0000, 0x0fff, compression.EncGZIP)
require.NoError(t, err)
- b2, err := putBlock(t, c, "tenant", parseTime("2024-02-05 00:00"), 0x1000, 0xffff)
+ b2, err := putBlock(t, c, "tenant", parseTime("2024-02-05 00:00"), 0x1000, 0xffff, compression.EncNone)
require.NoError(t, err)
t.Run("exists", func(t *testing.T) {
@@ -271,57 +292,62 @@ func TestBloomClient_GetBlocks(t *testing.T) {
}
func TestBloomClient_PutBlock(t *testing.T) {
- c, _ := newMockBloomClient(t)
- ctx := context.Background()
-
- start := parseTime("2024-02-05 12:00")
-
- tmpDir := t.TempDir()
- fp, _ := os.CreateTemp(t.TempDir(), "*.tar.gz")
-
- blockWriter := v1.NewDirectoryBlockWriter(tmpDir)
- err := blockWriter.Init()
- require.NoError(t, err)
-
- err = v1.TarGz(fp, v1.NewDirectoryBlockReader(tmpDir))
- require.NoError(t, err)
-
- block := Block{
- BlockRef: BlockRef{
- Ref: Ref{
- TenantID: "tenant",
- Bounds: v1.NewBounds(0x0000, 0xffff),
- TableName: "table_1234",
- StartTimestamp: start,
- EndTimestamp: start.Add(12 * time.Hour),
- },
- },
- Data: fp,
+ for _, enc := range supportedCompressions {
+ t.Run(enc.String(), func(t *testing.T) {
+ c, _ := newMockBloomClient(t)
+ ctx := context.Background()
+
+ start := parseTime("2024-02-05 12:00")
+
+ tmpDir := t.TempDir()
+ fp, _ := os.CreateTemp(t.TempDir(), "*"+blockExtension+compression.ToFileExtension(enc))
+
+ blockWriter := v1.NewDirectoryBlockWriter(tmpDir)
+ err := blockWriter.Init()
+ require.NoError(t, err)
+
+ err = v1.TarCompress(enc, fp, v1.NewDirectoryBlockReader(tmpDir))
+ require.NoError(t, err)
+
+ block := Block{
+ BlockRef: BlockRef{
+ Ref: Ref{
+ TenantID: "tenant",
+ Bounds: v1.NewBounds(0x0000, 0xffff),
+ TableName: "table_1234",
+ StartTimestamp: start,
+ EndTimestamp: start.Add(12 * time.Hour),
+ },
+ Encoding: enc,
+ },
+ Data: fp,
+ }
+
+ err = c.PutBlock(ctx, block)
+ require.NoError(t, err)
+
+ oc := c.client.(*testutils.InMemoryObjectClient)
+ stored := oc.Internals()
+ _, found := stored[c.Block(block.BlockRef).Addr()]
+ require.True(t, found)
+
+ blockDir, err := c.GetBlock(ctx, block.BlockRef)
+ require.NoError(t, err)
+
+ require.Equal(t, block.BlockRef, blockDir.BlockRef)
+ })
}
-
- err = c.PutBlock(ctx, block)
- require.NoError(t, err)
-
- oc := c.client.(*testutils.InMemoryObjectClient)
- stored := oc.Internals()
- _, found := stored[c.Block(block.BlockRef).Addr()]
- require.True(t, found)
-
- blockDir, err := c.GetBlock(ctx, block.BlockRef)
- require.NoError(t, err)
-
- require.Equal(t, block.BlockRef, blockDir.BlockRef)
}
func TestBloomClient_DeleteBlocks(t *testing.T) {
c, _ := newMockBloomClient(t)
ctx := context.Background()
- b1, err := putBlock(t, c, "tenant", parseTime("2024-02-05 00:00"), 0x0000, 0xffff)
+ b1, err := putBlock(t, c, "tenant", parseTime("2024-02-05 00:00"), 0x0000, 0xffff, compression.EncNone)
require.NoError(t, err)
- b2, err := putBlock(t, c, "tenant", parseTime("2024-02-06 00:00"), 0x0000, 0xffff)
+ b2, err := putBlock(t, c, "tenant", parseTime("2024-02-06 00:00"), 0x0000, 0xffff, compression.EncGZIP)
require.NoError(t, err)
- b3, err := putBlock(t, c, "tenant", parseTime("2024-02-07 00:00"), 0x0000, 0xffff)
+ b3, err := putBlock(t, c, "tenant", parseTime("2024-02-07 00:00"), 0x0000, 0xffff, compression.EncSnappy)
require.NoError(t, err)
oc := c.client.(*testutils.InMemoryObjectClient)
diff --git a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
deleted file mode 100644
index 52de4a4da5820..0000000000000
--- a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package bloomshipper
-
-import (
- "os"
-
- "github.com/go-kit/log"
- "github.com/go-kit/log/level"
-
- v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
-)
-
-func CompressBloomBlock(ref BlockRef, archivePath, localDst string, logger log.Logger) (Block, error) {
- blockToUpload := Block{}
- archiveFile, err := os.Create(archivePath)
- if err != nil {
- return blockToUpload, err
- }
-
- err = v1.TarGz(archiveFile, v1.NewDirectoryBlockReader(localDst))
- if err != nil {
- level.Error(logger).Log("msg", "creating bloom block archive file", "err", err)
- return blockToUpload, err
- }
-
- blockToUpload.BlockRef = ref
- blockToUpload.Data = archiveFile
-
- return blockToUpload, nil
-}
diff --git a/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go b/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go
deleted file mode 100644
index f0b1598dadf9e..0000000000000
--- a/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package bloomshipper
-
-import (
- "bytes"
- "io"
- "os"
- "path/filepath"
- "testing"
-
- "github.com/google/uuid"
- "github.com/stretchr/testify/require"
-
- v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
-)
-
-func directoryDoesNotExist(path string) bool {
- _, err := os.Lstat(path)
- return err != nil
-}
-
-const testArchiveFileName = "test-block-archive"
-
-func createBlockArchive(t *testing.T) (string, io.Reader, string, string) {
- dir := t.TempDir()
- mockBlockDir := filepath.Join(dir, "mock-block-dir")
- err := os.MkdirAll(mockBlockDir, 0777)
- require.NoError(t, err)
- bloomFile, err := os.Create(filepath.Join(mockBlockDir, v1.BloomFileName))
- require.NoError(t, err)
- bloomFileContent := uuid.NewString()
- _, err = io.Copy(bloomFile, bytes.NewReader([]byte(bloomFileContent)))
- require.NoError(t, err)
-
- seriesFile, err := os.Create(filepath.Join(mockBlockDir, v1.SeriesFileName))
- require.NoError(t, err)
- seriesFileContent := uuid.NewString()
- _, err = io.Copy(seriesFile, bytes.NewReader([]byte(seriesFileContent)))
- require.NoError(t, err)
-
- blockFilePath := filepath.Join(dir, testArchiveFileName)
- file, err := os.OpenFile(blockFilePath, os.O_CREATE|os.O_RDWR, 0700)
- require.NoError(t, err)
- err = v1.TarGz(file, v1.NewDirectoryBlockReader(mockBlockDir))
- require.NoError(t, err)
-
- _, _ = file.Seek(0, 0)
-
- return blockFilePath, file, bloomFileContent, seriesFileContent
-}
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
index 42d8d116b64a8..053078180547a 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
@@ -5,7 +5,6 @@ import (
"encoding/json"
"os"
"path/filepath"
- "strings"
"sync"
"time"
@@ -240,7 +239,7 @@ func (f *Fetcher) FetchBlocks(ctx context.Context, refs []BlockRef, opts ...Fetc
var enqueueTime time.Duration
for i := 0; i < n; i++ {
- key := f.client.Block(refs[i]).Addr()
+ key := cacheKey(refs[i])
dir, isFound, err := f.fromCache(ctx, key)
if err != nil {
return results, err
@@ -346,7 +345,7 @@ func (f *Fetcher) processTask(ctx context.Context, task downloadRequest[BlockRef
return
}
- key := f.client.Block(result.BlockRef).Addr()
+ key := cacheKey(result.BlockRef)
if task.async {
// put item into cache
err = f.blocksCache.Put(ctx, key, result)
@@ -407,10 +406,9 @@ func (f *Fetcher) loadBlocksFromFS(_ context.Context, refs []BlockRef) ([]BlockD
missing := make([]BlockRef, 0, len(refs))
for _, ref := range refs {
- path := f.localFSResolver.Block(ref).LocalPath()
- // the block directory does not contain the .tar.gz extension
+ // the block directory does not contain the .tar(.compression) extension
// since it is stripped when the archive is extracted into a folder
- path = strings.TrimSuffix(path, ".tar.gz")
+ path := localFilePathWithoutExtension(ref, f.localFSResolver)
if ok, clean := f.isBlockDir(path); ok {
blockDirs = append(blockDirs, NewBlockDirectory(ref, path))
} else {
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
index e7723b6d26536..9361c35e90ebd 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
@@ -6,7 +6,6 @@ import (
"fmt"
"os"
"path/filepath"
- "strings"
"testing"
"time"
@@ -15,6 +14,7 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/compression"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/local"
@@ -329,16 +329,16 @@ func TestFetcher_LoadBlocksFromFS(t *testing.T) {
refs := []BlockRef{
// no directory for block
- {Ref: Ref{TenantID: "tenant", TableName: "12345", Bounds: v1.NewBounds(0x0000, 0x0fff)}},
+ {Ref: Ref{TenantID: "tenant", TableName: "12345", Bounds: v1.NewBounds(0x0000, 0x0fff)}, Encoding: compression.EncNone},
// invalid directory for block
- {Ref: Ref{TenantID: "tenant", TableName: "12345", Bounds: v1.NewBounds(0x1000, 0x1fff)}},
+ {Ref: Ref{TenantID: "tenant", TableName: "12345", Bounds: v1.NewBounds(0x1000, 0x1fff)}, Encoding: compression.EncSnappy},
// valid directory for block
- {Ref: Ref{TenantID: "tenant", TableName: "12345", Bounds: v1.NewBounds(0x2000, 0x2fff)}},
+ {Ref: Ref{TenantID: "tenant", TableName: "12345", Bounds: v1.NewBounds(0x2000, 0x2fff)}, Encoding: compression.EncGZIP},
}
dirs := []string{
- strings.TrimSuffix(resolver.Block(refs[0]).LocalPath(), ".tar.gz"),
- strings.TrimSuffix(resolver.Block(refs[1]).LocalPath(), ".tar.gz"),
- strings.TrimSuffix(resolver.Block(refs[2]).LocalPath(), ".tar.gz"),
+ localFilePathWithoutExtension(refs[0], resolver),
+ localFilePathWithoutExtension(refs[1], resolver),
+ localFilePathWithoutExtension(refs[2], resolver),
}
createBlockDir(t, dirs[1])
@@ -360,7 +360,7 @@ func TestFetcher_LoadBlocksFromFS(t *testing.T) {
require.Len(t, found, 1)
require.Len(t, missing, 2)
- require.Equal(t, refs[2], found[0].BlockRef)
+ require.Equal(t, refs[2].Ref, found[0].Ref)
require.ElementsMatch(t, refs[0:2], missing)
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/resolver.go b/pkg/storage/stores/shipper/bloomshipper/resolver.go
index 8f86ce7cb09ee..3115f731fe13f 100644
--- a/pkg/storage/stores/shipper/bloomshipper/resolver.go
+++ b/pkg/storage/stores/shipper/bloomshipper/resolver.go
@@ -9,6 +9,7 @@ import (
"strconv"
"strings"
+ "github.com/grafana/loki/v3/pkg/compression"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
)
@@ -17,8 +18,8 @@ const (
MetasPrefix = "metas"
BlocksPrefix = "blocks"
- extTarGz = ".tar.gz"
- extJSON = ".json"
+ metaExtension = ".json"
+ blockExtension = v1.ExtTar
)
// KeyResolver is an interface for resolving keys to locations.
@@ -44,7 +45,7 @@ func (defaultKeyResolver) Meta(ref MetaRef) Location {
fmt.Sprintf("%v", ref.TableName),
ref.TenantID,
MetasPrefix,
- fmt.Sprintf("%v-%x%s", ref.Bounds, ref.Checksum, extJSON),
+ fmt.Sprintf("%v-%x%s", ref.Bounds, ref.Checksum, metaExtension),
}
}
@@ -58,7 +59,7 @@ func (defaultKeyResolver) ParseMetaKey(loc Location) (MetaRef, error) {
if err != nil {
return MetaRef{}, fmt.Errorf("failed to parse bounds of meta key %s : %w", loc, err)
}
- withoutExt := strings.TrimSuffix(fnParts[2], extJSON)
+ withoutExt := strings.TrimSuffix(fnParts[2], metaExtension)
checksum, err := strconv.ParseUint(withoutExt, 16, 64)
if err != nil {
return MetaRef{}, fmt.Errorf("failed to parse checksum of meta key %s : %w", loc, err)
@@ -80,28 +81,44 @@ func (defaultKeyResolver) ParseMetaKey(loc Location) (MetaRef, error) {
}
func (defaultKeyResolver) Block(ref BlockRef) Location {
+ ext := blockExtension + compression.ToFileExtension(ref.Encoding)
return simpleLocation{
BloomPrefix,
fmt.Sprintf("%v", ref.TableName),
ref.TenantID,
BlocksPrefix,
ref.Bounds.String(),
- fmt.Sprintf("%d-%d-%x%s", ref.StartTimestamp, ref.EndTimestamp, ref.Checksum, extTarGz),
+ fmt.Sprintf("%d-%d-%x%s", ref.StartTimestamp, ref.EndTimestamp, ref.Checksum, ext),
}
}
func (defaultKeyResolver) ParseBlockKey(loc Location) (BlockRef, error) {
dir, fn := path.Split(loc.Addr())
+
+ ext, enc := path.Ext(fn), compression.EncNone
+ if ext != "" && ext != blockExtension {
+ // trim compression extension
+ fn = strings.TrimSuffix(fn, ext)
+ enc = compression.FromFileExtension(ext)
+ ext = path.Ext(fn)
+ if ext != "" && ext != blockExtension {
+ return BlockRef{}, fmt.Errorf("failed to parse block. invalid block extension: %s, expected %s", ext, blockExtension)
+ }
+ }
+ // trim tar extension
+ fn = strings.TrimSuffix(fn, ext)
+
fnParts := strings.Split(fn, "-")
if len(fnParts) != 3 {
return BlockRef{}, fmt.Errorf("failed to split filename parts of block key %s : len must be 3, but was %d", loc, len(fnParts))
}
+
interval, err := ParseIntervalFromParts(fnParts[0], fnParts[1])
if err != nil {
return BlockRef{}, fmt.Errorf("failed to parse bounds of meta key %s : %w", loc, err)
}
- withoutExt := strings.TrimSuffix(fnParts[2], extTarGz)
- checksum, err := strconv.ParseUint(withoutExt, 16, 64)
+
+ checksum, err := strconv.ParseUint(fnParts[2], 16, 64)
if err != nil {
return BlockRef{}, fmt.Errorf("failed to parse checksum of meta key %s : %w", loc, err)
}
@@ -125,6 +142,7 @@ func (defaultKeyResolver) ParseBlockKey(loc Location) (BlockRef, error) {
EndTimestamp: interval.End,
Checksum: uint32(checksum),
},
+ Encoding: enc,
}, nil
}
@@ -266,3 +284,11 @@ func (ls locations) LocalPath() string {
return filepath.Join(xs...)
}
+
+func cacheKey(ref BlockRef) string {
+ return strings.TrimSuffix(defaultKeyResolver{}.Block(ref).Addr(), blockExtension+compression.ToFileExtension(ref.Encoding))
+}
+
+func localFilePathWithoutExtension(ref BlockRef, res KeyResolver) string {
+ return strings.TrimSuffix(res.Block(ref).LocalPath(), blockExtension+compression.ToFileExtension(ref.Encoding))
+}
diff --git a/pkg/storage/stores/shipper/bloomshipper/resolver_test.go b/pkg/storage/stores/shipper/bloomshipper/resolver_test.go
index ba45845ea9ba5..259bf7b2db3a3 100644
--- a/pkg/storage/stores/shipper/bloomshipper/resolver_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/resolver_test.go
@@ -5,6 +5,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/compression"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
)
@@ -31,27 +32,50 @@ func TestResolver_ParseMetaKey(t *testing.T) {
}
func TestResolver_ParseBlockKey(t *testing.T) {
- r := defaultKeyResolver{}
- ref := BlockRef{
- Ref: Ref{
- TenantID: "tenant",
- TableName: "table_1",
- Bounds: v1.NewBounds(0x0000, 0xffff),
- StartTimestamp: 0,
- EndTimestamp: 3600000,
- Checksum: 43981,
- },
- }
+ for _, tc := range []struct {
+ srcEnc, dstEnc compression.Encoding
+ }{
+ {compression.EncNone, compression.EncNone},
+ {compression.EncGZIP, compression.EncGZIP},
+ {compression.EncSnappy, compression.EncSnappy},
+ {compression.EncLZ4_64k, compression.EncLZ4_4M},
+ {compression.EncLZ4_256k, compression.EncLZ4_4M},
+ {compression.EncLZ4_1M, compression.EncLZ4_4M},
+ {compression.EncLZ4_4M, compression.EncLZ4_4M},
+ {compression.EncFlate, compression.EncFlate},
+ {compression.EncZstd, compression.EncZstd},
+ } {
+ t.Run(tc.srcEnc.String(), func(t *testing.T) {
+ r := defaultKeyResolver{}
+ ref := BlockRef{
+ Ref: Ref{
+ TenantID: "tenant",
+ TableName: "table_1",
+ Bounds: v1.NewBounds(0x0000, 0xffff),
+ StartTimestamp: 0,
+ EndTimestamp: 3600000,
+ Checksum: 43981,
+ },
+ Encoding: tc.srcEnc,
+ }
- // encode block ref as string
- loc := r.Block(ref)
- path := loc.LocalPath()
- require.Equal(t, "bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-abcd.tar.gz", path)
+ // encode block ref as string
+ loc := r.Block(ref)
+ path := loc.LocalPath()
+ fn := "bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-abcd"
+ require.Equal(t, fn+blockExtension+compression.ToFileExtension(tc.srcEnc), path)
+
+ // parse encoded string into block ref
+ parsed, err := r.ParseBlockKey(key(path))
+ require.NoError(t, err)
+ expected := BlockRef{
+ Ref: ref.Ref,
+ Encoding: tc.dstEnc,
+ }
+ require.Equal(t, expected, parsed)
+ })
+ }
- // parse encoded string into block ref
- parsed, err := r.ParseBlockKey(key(path))
- require.NoError(t, err)
- require.Equal(t, ref, parsed)
}
func TestResolver_ShardedPrefixedResolver(t *testing.T) {
@@ -87,7 +111,7 @@ func TestResolver_ShardedPrefixedResolver(t *testing.T) {
loc := r.Meta(metaRef)
require.Equal(t, "prefix/bloom/table_1/tenant/metas/0000000000000000-000000000000ffff-abcd.json", loc.LocalPath())
loc = r.Block(blockRef)
- require.Equal(t, "prefix/bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-bcde.tar.gz", loc.LocalPath())
+ require.Equal(t, "prefix/bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-bcde.tar", loc.LocalPath())
})
t.Run("multiple prefixes", func(t *testing.T) {
@@ -96,6 +120,6 @@ func TestResolver_ShardedPrefixedResolver(t *testing.T) {
loc := r.Meta(metaRef)
require.Equal(t, "b/bloom/table_1/tenant/metas/0000000000000000-000000000000ffff-abcd.json", loc.LocalPath())
loc = r.Block(blockRef)
- require.Equal(t, "d/bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-bcde.tar.gz", loc.LocalPath())
+ require.Equal(t, "d/bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-bcde.tar", loc.LocalPath())
})
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go
index 6a6705f8f0be0..674e0c02a506b 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go
@@ -15,6 +15,7 @@ import (
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/storage"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
@@ -109,13 +110,14 @@ func createMetaInStorage(store *BloomStore, tenant string, start model.Time, min
func createBlockInStorage(t *testing.T, store *BloomStore, tenant string, start model.Time, minFp, maxFp model.Fingerprint) (Block, error) {
tmpDir := t.TempDir()
- fp, _ := os.CreateTemp(t.TempDir(), "*.tar.gz")
+ fp, _ := os.CreateTemp(t.TempDir(), "*.tar")
blockWriter := v1.NewDirectoryBlockWriter(tmpDir)
err := blockWriter.Init()
require.NoError(t, err)
- err = v1.TarGz(fp, v1.NewDirectoryBlockReader(tmpDir))
+ enc := compression.EncGZIP
+ err = v1.TarCompress(enc, fp, v1.NewDirectoryBlockReader(tmpDir))
require.NoError(t, err)
_, _ = fp.Seek(0, 0)
@@ -128,6 +130,7 @@ func createBlockInStorage(t *testing.T, store *BloomStore, tenant string, start
StartTimestamp: start,
EndTimestamp: start.Add(12 * time.Hour),
},
+ Encoding: enc,
},
Data: fp,
}
|
perf
|
Remove compression of `.tar` archived bloom blocks (#14159)
|
f98ff7f58400b5f5a425fae003fb959bfb8c6454
|
2024-06-10 19:55:04
|
洪阿南
|
fix: Fix duplicate enqueue item problem in bloom download queue when do sync download (#13114)
| false
|
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
index 69715158950e0..c2a2939a805b3 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
@@ -502,6 +502,7 @@ func newDownloadQueue[T any, R any](size, workers int, process processFunc[T, R]
func (q *downloadQueue[T, R]) enqueue(t downloadRequest[T, R]) {
if !t.async {
q.queue <- t
+ return
}
// for async task we attempt to dedupe task already in progress.
q.enqueuedMutex.Lock()
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
index 847c5f69b6a29..fb802fd63b9a5 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
@@ -267,6 +267,59 @@ func TestFetcher_DownloadQueue(t *testing.T) {
}
})
+
+ t.Run("download multiple items and return in order", func(t *testing.T) {
+ ctx := context.Background()
+
+ q, err := newDownloadQueue[bool, bool](
+ 100,
+ 1,
+ func(_ context.Context, r downloadRequest[bool, bool]) {
+ r.results <- downloadResponse[bool]{
+ key: r.key,
+ idx: r.idx,
+ item: true,
+ }
+ },
+ log.NewNopLogger(),
+ )
+ require.NoError(t, err)
+
+ count := 10
+ resultsCh := make(chan downloadResponse[bool], count)
+ errorsCh := make(chan error, count)
+
+ reqs := buildDownloadRequest(ctx, count, resultsCh, errorsCh)
+ for _, r := range reqs {
+ q.enqueue(r)
+ }
+
+ for i := 0; i < count; i++ {
+ select {
+ case err := <-errorsCh:
+ require.False(t, true, "got %+v should have received a response instead", err)
+ case res := <-resultsCh:
+ require.True(t, res.item)
+ require.Equal(t, reqs[i].key, res.key)
+ require.Equal(t, reqs[i].idx, res.idx)
+ }
+ }
+ })
+}
+
+func buildDownloadRequest(ctx context.Context, count int, resCh chan downloadResponse[bool], errCh chan error) []downloadRequest[bool, bool] {
+ requests := make([]downloadRequest[bool, bool], count)
+ for i := 0; i < count; i++ {
+ requests[i] = downloadRequest[bool, bool]{
+ ctx: ctx,
+ item: false,
+ key: "test",
+ idx: i,
+ results: resCh,
+ errors: errCh,
+ }
+ }
+ return requests
}
func TestFetcher_LoadBlocksFromFS(t *testing.T) {
|
fix
|
Fix duplicate enqueue item problem in bloom download queue when do sync download (#13114)
|
abf08e2411c752caca81beb94bd0e65c4b9bac50
|
2022-11-04 00:42:07
|
Periklis Tsirakidis
|
operator: Add support for built-in-cert-rotation for all internal lokistack encryption (#7064)
| false
|
diff --git a/operator/apis/config/v1/projectconfig_types.go b/operator/apis/config/v1/projectconfig_types.go
index 8b553b6f20bbc..cc13f80fc448d 100644
--- a/operator/apis/config/v1/projectconfig_types.go
+++ b/operator/apis/config/v1/projectconfig_types.go
@@ -5,9 +5,30 @@ import (
cfg "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
)
+// BuiltInCertManagement is the configuration for the built-in facility to generate and rotate
+// TLS client and serving certificates for all LokiStack services and internal clients except
+// for the lokistack-gateway.
+type BuiltInCertManagement struct {
+ // Enabled defines to flag to enable/disable built-in certificate management feature gate.
+ Enabled bool `json:"enabled,omitempty"`
+ // CACertValidity defines the total duration of the CA certificate validity.
+ CACertValidity string `json:"caValidity,omitempty"`
+ // CACertRefresh defines the duration of the CA certificate validity until a rotation
+ // should happen. It can be set up to 80% of CA certificate validity or equal to the
+ // CA certificate validity. Latter should be used only for rotating only when expired.
+ CACertRefresh string `json:"caRefresh,omitempty"`
+ // CertValidity defines the total duration of the validity for all LokiStack certificates.
+ CertValidity string `json:"certValidity,omitempty"`
+ // CertRefresh defines the duration of the certificate validity until a rotation
+ // should happen. It can be set up to 80% of certificate validity or equal to the
+ // certificate validity. Latter should be used only for rotating only when expired.
+ // The refresh is applied to all LokiStack certificates at once.
+ CertRefresh string `json:"certRefresh,omitempty"`
+}
+
// OpenShiftFeatureGates is the supported set of all operator features gates on OpenShift.
type OpenShiftFeatureGates struct {
- // ServingCertsService enables OpenShift service-ca annotations on Services
+ // ServingCertsService enables OpenShift service-ca annotations on the lokistack-gateway service only
// to use the in-platform CA and generate a TLS cert/key pair per service for
// in-cluster data-in-transit encryption.
// More details: https://docs.openshift.com/container-platform/latest/security/certificate_types_descriptions/service-ca-certificates.html
@@ -54,6 +75,17 @@ type FeatureGates struct {
// suffix `-ca-bundle`, e.g. `lokistack-dev-ca-bundle` and the following data:
// - `service-ca.crt`: The CA signing the service certificate in `tls.crt`.
GRPCEncryption bool `json:"grpcEncryption,omitempty"`
+ // BuiltInCertManagement enables the built-in facility for generating and rotating
+ // TLS client and serving certificates for all LokiStack services and internal clients except
+ // for the lokistack-gateway, In detail all internal Loki HTTP and GRPC communication is lifted
+ // to require mTLS. For the lokistack-gateay you need to provide a secret with or use the `ServingCertsService`
+ // on OpenShift:
+ // - `tls.crt`: The TLS server side certificate.
+ // - `tls.key`: The TLS key for server-side encryption.
+ // In addition each service requires a configmap named as the LokiStack CR with the
+ // suffix `-ca-bundle`, e.g. `lokistack-dev-ca-bundle` and the following data:
+ // - `service-ca.crt`: The CA signing the service certificate in `tls.crt`.
+ BuiltInCertManagement BuiltInCertManagement `json:"builtInCertManagement,omitempty"`
// LokiStackGateway enables reconciling the reverse-proxy lokistack-gateway
// component for multi-tenant authentication/authorization traffic control
diff --git a/operator/apis/config/v1/zz_generated.deepcopy.go b/operator/apis/config/v1/zz_generated.deepcopy.go
index 3ff10850cd1ad..c85446c21e0b4 100644
--- a/operator/apis/config/v1/zz_generated.deepcopy.go
+++ b/operator/apis/config/v1/zz_generated.deepcopy.go
@@ -9,9 +9,25 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BuiltInCertManagement) DeepCopyInto(out *BuiltInCertManagement) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuiltInCertManagement.
+func (in *BuiltInCertManagement) DeepCopy() *BuiltInCertManagement {
+ if in == nil {
+ return nil
+ }
+ out := new(BuiltInCertManagement)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FeatureGates) DeepCopyInto(out *FeatureGates) {
*out = *in
+ out.BuiltInCertManagement = in.BuiltInCertManagement
out.OpenShift = in.OpenShift
}
diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go
index da24185c4c0a3..901331dc8eb56 100644
--- a/operator/apis/loki/v1/lokistack_types.go
+++ b/operator/apis/loki/v1/lokistack_types.go
@@ -761,6 +761,8 @@ const (
ReasonInvalidTenantsConfiguration LokiStackConditionReason = "InvalidTenantsConfiguration"
// ReasonMissingGatewayOpenShiftBaseDomain when the reconciler cannot lookup the OpenShift DNS base domain.
ReasonMissingGatewayOpenShiftBaseDomain LokiStackConditionReason = "MissingGatewayOpenShiftBaseDomain"
+ // ReasonFailedCertificateRotation when the reconciler cannot rotate any of the required TLS certificates.
+ ReasonFailedCertificateRotation LokiStackConditionReason = "FailedCertificateRotation"
)
// PodStatusMap defines the type for mapping pod status to pod name.
diff --git a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
index 7b03313b8e93f..a5f6abe18bf5a 100644
--- a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
+++ b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
@@ -26,6 +26,16 @@ data:
#
httpEncryption: true
grpcEncryption: true
+ builtInCertManagement:
+ enabled: true
+ # CA certificate validity: 5 years
+ caValidity: 43830h
+ # CA certificate refresh at 80% of validity
+ caRefresh: 35064h
+ # Target certificate validity: 90d
+ certValidity: 2160h
+ # Target certificate refresh at 80% of validity
+ certRefresh: 1728h
#
# Component feature gates
#
diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
index ba635e196b66a..139861adb4c53 100644
--- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
@@ -984,6 +984,7 @@ spec:
- endpoints
- nodes
- pods
+ - secrets
- serviceaccounts
- services
verbs:
diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go
index 0677b160d8527..091221ce6655d 100644
--- a/operator/cmd/loki-broker/main.go
+++ b/operator/cmd/loki-broker/main.go
@@ -37,10 +37,13 @@ func (c *config) registerFlags(f *flag.FlagSet) {
f.StringVar(&c.Namespace, "namespace", "", "Namespace to deploy to")
f.StringVar(&c.Image, "image", manifests.DefaultContainerImage, "The Loki image pull spec loation.")
// Feature flags
- c.featureFlags = configv1.FeatureGates{}
- c.featureFlags.OpenShift = configv1.OpenShiftFeatureGates{}
- f.BoolVar(&c.featureFlags.OpenShift.ServingCertsService, "with-serving-certs-service", false, "Enable usage of serving certs service on OpenShift.")
f.BoolVar(&c.featureFlags.ServiceMonitors, "with-service-monitors", false, "Enable service monitors for all LokiStack components.")
+ f.BoolVar(&c.featureFlags.OpenShift.ServingCertsService, "with-serving-certs-service", false, "Enable usage of serving certs service on OpenShift.")
+ f.BoolVar(&c.featureFlags.BuiltInCertManagement.Enabled, "with-builtin-cert-management", false, "Enable usage built-in cert generation and rotation.")
+ f.StringVar(&c.featureFlags.BuiltInCertManagement.CACertValidity, "ca-cert-validity", "8760h", "CA Certificate validity duration.")
+ f.StringVar(&c.featureFlags.BuiltInCertManagement.CACertRefresh, "ca-cert-refresh", "7008h", "CA Certificate refresh time.")
+ f.StringVar(&c.featureFlags.BuiltInCertManagement.CertValidity, "target-cert-validity", "2160h", "Target Certificate validity duration.")
+ f.StringVar(&c.featureFlags.BuiltInCertManagement.CertRefresh, "target-cert-refresh", "1728h", "Target Certificate refresh time.")
f.BoolVar(&c.featureFlags.HTTPEncryption, "with-http-tls-services", false, "Enables TLS for all LokiStack GRPC services.")
f.BoolVar(&c.featureFlags.GRPCEncryption, "with-grpc-tls-services", false, "Enables TLS for all LokiStack HTTP services.")
f.BoolVar(&c.featureFlags.ServiceMonitorTLSEndpoints, "with-service-monitor-tls-endpoints", false, "Enable TLS endpoint for service monitors.")
diff --git a/operator/config/overlays/development/manager_related_image_patch.yaml b/operator/config/overlays/development/manager_related_image_patch.yaml
index f26143a21cc18..e4c6fd9db5eb5 100644
--- a/operator/config/overlays/development/manager_related_image_patch.yaml
+++ b/operator/config/overlays/development/manager_related_image_patch.yaml
@@ -9,6 +9,6 @@ spec:
- name: manager
env:
- name: RELATED_IMAGE_LOKI
- value: docker.io/grafana/loki:main-ec0bf70
+ value: docker.io/grafana/loki:k120-26d2989
- name: RELATED_IMAGE_GATEWAY
value: quay.io/observatorium/api:latest
diff --git a/operator/config/overlays/openshift/controller_manager_config.yaml b/operator/config/overlays/openshift/controller_manager_config.yaml
index c69ada2e8edae..d1e0d52a4242d 100644
--- a/operator/config/overlays/openshift/controller_manager_config.yaml
+++ b/operator/config/overlays/openshift/controller_manager_config.yaml
@@ -23,6 +23,16 @@ featureGates:
#
httpEncryption: true
grpcEncryption: true
+ builtInCertManagement:
+ enabled: true
+ # CA certificate validity: 5 years
+ caValidity: 43830h
+ # CA certificate refresh at 80% of validity
+ caRefresh: 35064h
+ # Target certificate validity: 90d
+ certValidity: 2160h
+ # Target certificate refresh at 80% of validity
+ certRefresh: 1728h
#
# Component feature gates
#
diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml
index 38c74b296090b..806d79f83850f 100644
--- a/operator/config/rbac/role.yaml
+++ b/operator/config/rbac/role.yaml
@@ -12,6 +12,7 @@ rules:
- endpoints
- nodes
- pods
+ - secrets
- serviceaccounts
- services
verbs:
diff --git a/operator/controllers/loki/certrotation_controller.go b/operator/controllers/loki/certrotation_controller.go
new file mode 100644
index 0000000000000..382498cc40886
--- /dev/null
+++ b/operator/controllers/loki/certrotation_controller.go
@@ -0,0 +1,113 @@
+package controllers
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/go-logr/logr"
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/controllers/loki/internal/lokistack"
+ "github.com/grafana/loki/operator/controllers/loki/internal/management/state"
+ "github.com/grafana/loki/operator/internal/certrotation"
+ "github.com/grafana/loki/operator/internal/external/k8s"
+ "github.com/grafana/loki/operator/internal/handlers"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// CertRotationReconciler reconciles the `loki.grafana.com/certRotationRequiredAt` annotation on
+// any LokiStack object associated with any of the owned signer/client/serving certificates secrets
+// and CA bundle configmap.
+type CertRotationReconciler struct {
+ client.Client
+ Log logr.Logger
+ Scheme *runtime.Scheme
+ FeatureGates configv1.FeatureGates
+}
+
+// Reconcile is part of the main kubernetes reconciliation loop which aims to
+// move the current state of the cluster closer to the desired state.
+// Compare the state specified by the LokiStack object against the actual cluster state,
+// and then perform operations to make the cluster state reflect the state specified by
+// the user.
+//
+// For more details, check Reconcile and its Result here:
+// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
+func (r *CertRotationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ managed, err := state.IsManaged(ctx, req, r.Client)
+ if err != nil {
+ return ctrl.Result{
+ Requeue: true,
+ }, err
+ }
+ if !managed {
+ r.Log.Info("Skipping reconciliation for unmanaged LokiStack resource", "name", req.String())
+ // Stop requeueing for unmanaged LokiStack custom resources
+ return ctrl.Result{}, nil
+ }
+
+ rt, err := certrotation.ParseRotation(r.FeatureGates.BuiltInCertManagement)
+ if err != nil {
+ return ctrl.Result{Requeue: false}, err
+ }
+
+ checkExpiryAfter := expiryRetryAfter(rt.TargetCertRefresh)
+ r.Log.Info("Checking if LokiStack certificates expired", "name", req.String(), "interval", checkExpiryAfter.String())
+
+ var expired *certrotation.CertExpiredError
+
+ err = handlers.CheckCertExpiry(ctx, r.Log, req, r.Client, r.FeatureGates)
+ switch {
+ case errors.As(err, &expired):
+ r.Log.Info("Certificate expired", "msg", expired.Error())
+ case err != nil:
+ return ctrl.Result{
+ Requeue: true,
+ }, err
+ default:
+ r.Log.Info("Skipping cert rotation, all LokiStack certificates still valid", "name", req.String())
+ return ctrl.Result{
+ RequeueAfter: checkExpiryAfter,
+ }, nil
+ }
+
+ r.Log.Error(err, "LokiStack certificates expired", "name", req.String())
+ err = lokistack.AnnotateForRequiredCertRotation(ctx, r.Client, req.Name, req.Namespace)
+ if err != nil {
+ r.Log.Error(err, "failed to annotate required cert rotation", "name", req.String())
+ return ctrl.Result{
+ Requeue: true,
+ }, err
+ }
+
+ return ctrl.Result{
+ RequeueAfter: checkExpiryAfter,
+ }, nil
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *CertRotationReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ b := ctrl.NewControllerManagedBy(mgr)
+ return r.buildController(k8s.NewCtrlBuilder(b))
+}
+
+func (r *CertRotationReconciler) buildController(bld k8s.Builder) error {
+ return bld.
+ For(&lokiv1.LokiStack{}).
+ Owns(&corev1.Secret{}).
+ Complete(r)
+}
+
+func expiryRetryAfter(certRefresh time.Duration) time.Duration {
+ day := 24 * time.Hour
+ if certRefresh > day {
+ return 12 * time.Hour
+ }
+
+ return certRefresh / 4
+}
diff --git a/operator/controllers/loki/certrotation_controller_test.go b/operator/controllers/loki/certrotation_controller_test.go
new file mode 100644
index 0000000000000..4f3dd0ae2890c
--- /dev/null
+++ b/operator/controllers/loki/certrotation_controller_test.go
@@ -0,0 +1,74 @@
+package controllers
+
+import (
+ "testing"
+ "time"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+)
+
+func TestCertRotationController_RegistersCustomResource_WithDefaultPredicates(t *testing.T) {
+ b := &k8sfakes.FakeBuilder{}
+ k := &k8sfakes.FakeClient{}
+ c := &CertRotationReconciler{Client: k, Scheme: scheme}
+
+ b.ForReturns(b)
+ b.OwnsReturns(b)
+
+ err := c.buildController(b)
+ require.NoError(t, err)
+
+ // Require only one For-Call for the custom resource
+ require.Equal(t, 1, b.ForCallCount())
+
+ // Require For-call with LokiStack resource
+ obj, _ := b.ForArgsForCall(0)
+ require.Equal(t, &lokiv1.LokiStack{}, obj)
+}
+
+func TestCertRotationController_RegisterOwnedResources_WithDefaultPredicates(t *testing.T) {
+ b := &k8sfakes.FakeBuilder{}
+ k := &k8sfakes.FakeClient{}
+ c := &CertRotationReconciler{Client: k, Scheme: scheme}
+
+ b.ForReturns(b)
+ b.OwnsReturns(b)
+
+ err := c.buildController(b)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, b.OwnsCallCount())
+
+ obj, _ := b.OwnsArgsForCall(0)
+ require.Equal(t, &corev1.Secret{}, obj)
+}
+
+func TestCertRotationController_ExpiryRetryAfter(t *testing.T) {
+ tt := []struct {
+ desc string
+ refresh time.Duration
+ wantDuration time.Duration
+ wantError bool
+ }{
+ {
+ desc: "multi-day refresh durarion",
+ refresh: 120 * time.Hour,
+ wantDuration: 12 * time.Hour,
+ },
+ {
+ desc: "less than a day refresh duration",
+ refresh: 10 * time.Hour,
+ wantDuration: 2*time.Hour + 30*time.Minute,
+ },
+ }
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ require.Equal(t, tc.wantDuration, expiryRetryAfter(tc.refresh))
+ })
+ }
+}
diff --git a/operator/controllers/loki/internal/lokistack/certrotation_discovery.go b/operator/controllers/loki/internal/lokistack/certrotation_discovery.go
new file mode 100644
index 0000000000000..c92e0115016c8
--- /dev/null
+++ b/operator/controllers/loki/internal/lokistack/certrotation_discovery.go
@@ -0,0 +1,45 @@
+package lokistack
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+const certRotationRequiredAtKey = "loki.grafana.com/certRotationRequiredAt"
+
+// AnnotateForRequiredCertRotation adds/updates the `loki.grafana.com/certRotationRequiredAt` annotation
+// to the named Lokistack if any of the managed client/serving/ca certificates expired. If no LokiStack
+// is found, then skip reconciliation.
+func AnnotateForRequiredCertRotation(ctx context.Context, k k8s.Client, name, namespace string) error {
+ var s lokiv1.LokiStack
+ key := client.ObjectKey{Name: name, Namespace: namespace}
+
+ if err := k.Get(ctx, key, &s); err != nil {
+ if apierrors.IsNotFound(err) {
+ // Do nothing
+ return nil
+ }
+
+ return kverrors.Wrap(err, "failed to get lokistack", "key", key)
+ }
+
+ ss := s.DeepCopy()
+ if ss.Annotations == nil {
+ ss.Annotations = make(map[string]string)
+ }
+
+ ss.Annotations[certRotationRequiredAtKey] = time.Now().UTC().Format(time.RFC3339)
+
+ if err := k.Update(ctx, ss); err != nil {
+ return kverrors.Wrap(err, fmt.Sprintf("failed to update lokistack `%s` annotation", certRotationRequiredAtKey), "key", key)
+ }
+
+ return nil
+}
diff --git a/operator/controllers/loki/lokistack_controller.go b/operator/controllers/loki/lokistack_controller.go
index d1afaa31a7901..ecc98c2acc8d3 100644
--- a/operator/controllers/loki/lokistack_controller.go
+++ b/operator/controllers/loki/lokistack_controller.go
@@ -77,7 +77,7 @@ type LokiStackReconciler struct {
// +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks/finalizers,verbs=update
-// +kubebuilder:rbac:groups="",resources=pods;nodes;services;endpoints;configmaps;serviceaccounts,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups="",resources=pods;nodes;services;endpoints;configmaps;secrets;serviceaccounts,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch
// +kubebuilder:rbac:groups=apps,resources=deployments;statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings;clusterroles;roles;rolebindings,verbs=get;list;watch;create;update;patch;delete
@@ -110,11 +110,33 @@ func (r *LokiStackReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, nil
}
+ if r.FeatureGates.BuiltInCertManagement.Enabled {
+ err = handlers.CreateOrRotateCertificates(ctx, r.Log, req, r.Client, r.Scheme, r.FeatureGates)
+ if res, derr := handleDegradedError(ctx, r.Client, req, err); derr != nil {
+ return res, derr
+ }
+ }
+
err = handlers.CreateOrUpdateLokiStack(ctx, r.Log, req, r.Client, r.Scheme, r.FeatureGates)
+ if res, derr := handleDegradedError(ctx, r.Client, req, err); derr != nil {
+ return res, derr
+ }
+
+ err = status.Refresh(ctx, r.Client, req)
+ if err != nil {
+ return ctrl.Result{
+ Requeue: true,
+ RequeueAfter: time.Second,
+ }, err
+ }
+ return ctrl.Result{}, nil
+}
+
+func handleDegradedError(ctx context.Context, c client.Client, req ctrl.Request, err error) (ctrl.Result, error) {
var degraded *status.DegradedError
if errors.As(err, °raded) {
- err = status.SetDegradedCondition(ctx, r.Client, req, degraded.Message, degraded.Reason)
+ err = status.SetDegradedCondition(ctx, c, req, degraded.Message, degraded.Reason)
if err != nil {
return ctrl.Result{
Requeue: true,
@@ -135,14 +157,6 @@ func (r *LokiStackReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}, err
}
- err = status.Refresh(ctx, r.Client, req)
- if err != nil {
- return ctrl.Result{
- Requeue: true,
- RequeueAfter: time.Second,
- }, err
- }
-
return ctrl.Result{}, nil
}
@@ -156,6 +170,7 @@ func (r *LokiStackReconciler) buildController(bld k8s.Builder) error {
bld = bld.
For(&lokiv1.LokiStack{}, createOrUpdateOnlyPred).
Owns(&corev1.ConfigMap{}, updateOrDeleteOnlyPred).
+ Owns(&corev1.Secret{}, updateOrDeleteOnlyPred).
Owns(&corev1.ServiceAccount{}, updateOrDeleteOnlyPred).
Owns(&corev1.Service{}, updateOrDeleteOnlyPred).
Owns(&appsv1.Deployment{}, updateOrDeleteOnlyPred).
diff --git a/operator/controllers/loki/lokistack_controller_test.go b/operator/controllers/loki/lokistack_controller_test.go
index 91f2c8bb18ee2..0ff57aa4e1e53 100644
--- a/operator/controllers/loki/lokistack_controller_test.go
+++ b/operator/controllers/loki/lokistack_controller_test.go
@@ -85,55 +85,61 @@ func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *test
{
obj: &corev1.ConfigMap{},
index: 0,
- ownCallsCount: 10,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
{
- obj: &corev1.ServiceAccount{},
+ obj: &corev1.Secret{},
index: 1,
- ownCallsCount: 10,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
{
- obj: &corev1.Service{},
+ obj: &corev1.ServiceAccount{},
index: 2,
- ownCallsCount: 10,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
{
- obj: &appsv1.Deployment{},
+ obj: &corev1.Service{},
index: 3,
- ownCallsCount: 10,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
{
- obj: &appsv1.StatefulSet{},
+ obj: &appsv1.Deployment{},
index: 4,
- ownCallsCount: 10,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
{
- obj: &rbacv1.ClusterRole{},
+ obj: &appsv1.StatefulSet{},
index: 5,
- ownCallsCount: 10,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
{
- obj: &rbacv1.ClusterRoleBinding{},
+ obj: &rbacv1.ClusterRole{},
index: 6,
- ownCallsCount: 10,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
{
- obj: &rbacv1.Role{},
+ obj: &rbacv1.ClusterRoleBinding{},
index: 7,
- ownCallsCount: 10,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
{
- obj: &rbacv1.RoleBinding{},
+ obj: &rbacv1.Role{},
index: 8,
- ownCallsCount: 10,
+ ownCallsCount: 11,
+ pred: updateOrDeleteOnlyPred,
+ },
+ {
+ obj: &rbacv1.RoleBinding{},
+ index: 9,
+ ownCallsCount: 11,
pred: updateOrDeleteOnlyPred,
},
// The next two share the same index, because the
@@ -141,8 +147,8 @@ func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *test
// or a Route (i.e. OpenShift).
{
obj: &networkingv1.Ingress{},
- index: 9,
- ownCallsCount: 10,
+ index: 10,
+ ownCallsCount: 11,
featureGates: configv1.FeatureGates{
OpenShift: configv1.OpenShiftFeatureGates{
GatewayRoute: false,
@@ -152,8 +158,8 @@ func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *test
},
{
obj: &routev1.Route{},
- index: 9,
- ownCallsCount: 10,
+ index: 10,
+ ownCallsCount: 11,
featureGates: configv1.FeatureGates{
OpenShift: configv1.OpenShiftFeatureGates{
GatewayRoute: true,
@@ -163,8 +169,8 @@ func TestLokiStackController_RegisterOwnedResourcesForUpdateOrDeleteOnly(t *test
},
{
obj: &openshiftconfigv1.APIServer{},
- index: 10,
- ownCallsCount: 11,
+ index: 11,
+ ownCallsCount: 12,
featureGates: configv1.FeatureGates{
OpenShift: configv1.OpenShiftFeatureGates{
ClusterTLSPolicy: true,
diff --git a/operator/go.mod b/operator/go.mod
index 20644e2f0e616..d9250c1988c48 100644
--- a/operator/go.mod
+++ b/operator/go.mod
@@ -28,6 +28,7 @@ require (
github.com/openshift/library-go v0.0.0-20220622115547-84d884f4c9f6
github.com/prometheus/prometheus v1.8.2-0.20220303173753-edfe657b5405
gopkg.in/yaml.v2 v2.4.0
+ k8s.io/apiserver v0.25.0
)
require (
@@ -165,7 +166,6 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 // indirect
k8s.io/apiextensions-apiserver v0.25.0 // indirect
- k8s.io/apiserver v0.25.0 // indirect
k8s.io/component-base v0.25.0 // indirect
k8s.io/klog/v2 v2.70.1 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
diff --git a/operator/hack/addons_kind_certs.yaml b/operator/hack/addons_kind_certs.yaml
index 45994b3f524e5..5b5d4f23e983b 100644
--- a/operator/hack/addons_kind_certs.yaml
+++ b/operator/hack/addons_kind_certs.yaml
@@ -8,14 +8,14 @@ spec:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
- name: lokistack-dev-ca-bundle
+ name: lokistack-dev-signing-ca
spec:
isCA: true
commonName: lokistack-dev-gateway-http.default.svc
dnsNames:
- "*.default.svc"
- "*.default.svc.cluster.local"
- secretName: lokistack-dev-ca-bundle
+ secretName: lokistack-dev-signing-ca
privateKey:
algorithm: ECDSA
size: 256
@@ -30,7 +30,7 @@ metadata:
name: kind-issuer
spec:
ca:
- secretName: lokistack-dev-ca-bundle
+ secretName: lokistack-dev-signing-ca
---
apiVersion: cert-manager.io/v1
kind: Certificate
@@ -48,6 +48,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-gateway-http.default.svc"
@@ -58,6 +59,27 @@ spec:
---
apiVersion: cert-manager.io/v1
kind: Certificate
+metadata:
+ name: lokistack-dev-gateway-client-http
+ namespace: default
+spec:
+ secretName: lokistack-dev-gateway-client-http
+ duration: 2160h # 90d
+ renewBefore: 360h # 15d
+ commonName: lokistack-dev-gateway-http.default.svc
+ privateKey:
+ rotationPolicy: Never
+ algorithm: ECDSA
+ encoding: PKCS8
+ size: 256
+ usages:
+ - client auth
+ issuerRef:
+ name: kind-issuer
+ kind: Issuer
+---
+apiVersion: cert-manager.io/v1
+kind: Certificate
metadata:
name: lokistack-dev-distributor-grpc
namespace: default
@@ -72,6 +94,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-distributor-grpc.default.svc"
@@ -96,6 +119,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-distributor-http.default.svc"
@@ -120,6 +144,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-ingester-grpc.default.svc"
@@ -144,6 +169,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-ingester-http.default.svc"
@@ -168,6 +194,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-query-frontend-grpc.default.svc"
@@ -192,6 +219,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-query-frontend-http.default.svc"
@@ -216,6 +244,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-querier-grpc.default.svc"
@@ -240,6 +269,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-querier-http.default.svc"
@@ -264,6 +294,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-index-gateway-grpc.default.svc"
@@ -288,6 +319,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-index-gateway-http.default.svc"
@@ -312,6 +344,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-compactor-grpc.default.svc"
@@ -336,6 +369,7 @@ spec:
encoding: PKCS8
size: 256
usages:
+ - client auth
- server auth
dnsNames:
- "lokistack-dev-compactor-http.default.svc"
diff --git a/operator/internal/certrotation/build.go b/operator/internal/certrotation/build.go
new file mode 100644
index 0000000000000..4f85d3935e448
--- /dev/null
+++ b/operator/internal/certrotation/build.go
@@ -0,0 +1,76 @@
+package certrotation
+
+import (
+ "fmt"
+ "time"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ "k8s.io/apiserver/pkg/authentication/user"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var defaultUserInfo = &user.DefaultInfo{Name: "system:lokistacks", Groups: []string{"system:logging"}}
+
+// BuildAll builds all secrets and configmaps containing
+// CA certificates, CA bundles and client certificates for
+// a LokiStack.
+func BuildAll(opts Options) ([]client.Object, error) {
+ res := make([]client.Object, 0)
+
+ obj, err := buildSigningCASecret(&opts)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, obj)
+
+ obj, err = buildCABundle(&opts)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, obj)
+
+ objs, err := buildTargetCertKeyPairSecrets(opts)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, objs...)
+
+ return res, nil
+}
+
+// ApplyDefaultSettings merges the default options with the ones we give.
+func ApplyDefaultSettings(opts *Options, cfg configv1.BuiltInCertManagement) error {
+ rotation, err := ParseRotation(cfg)
+ if err != nil {
+ return err
+ }
+ opts.Rotation = rotation
+
+ clock := time.Now
+ opts.Signer.Rotation = signerRotation{
+ Clock: clock,
+ }
+
+ if opts.Certificates == nil {
+ opts.Certificates = make(map[string]SelfSignedCertKey)
+ }
+ for _, name := range ComponentCertSecretNames(opts.StackName) {
+ r := certificateRotation{
+ Clock: clock,
+ UserInfo: defaultUserInfo,
+ Hostnames: []string{
+ fmt.Sprintf("%s.%s.svc", name, opts.StackNamespace),
+ fmt.Sprintf("%s.%s.svc.cluster.local", name, opts.StackNamespace),
+ },
+ }
+
+ cert, ok := opts.Certificates[name]
+ if !ok {
+ cert = SelfSignedCertKey{}
+ }
+ cert.Rotation = r
+ opts.Certificates[name] = cert
+ }
+
+ return nil
+}
diff --git a/operator/internal/certrotation/build_test.go b/operator/internal/certrotation/build_test.go
new file mode 100644
index 0000000000000..dd57ad183e62e
--- /dev/null
+++ b/operator/internal/certrotation/build_test.go
@@ -0,0 +1,134 @@
+package certrotation
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestBuildAll(t *testing.T) {
+ cfg := configv1.BuiltInCertManagement{
+ CACertValidity: "10m",
+ CACertRefresh: "5m",
+ CertValidity: "2m",
+ CertRefresh: "1m",
+ }
+
+ opts := Options{
+ StackName: "dev",
+ StackNamespace: "ns",
+ }
+ err := ApplyDefaultSettings(&opts, cfg)
+ require.NoError(t, err)
+
+ objs, err := BuildAll(opts)
+ require.NoError(t, err)
+ require.Len(t, objs, 17)
+
+ for _, obj := range objs {
+ require.True(t, strings.HasPrefix(obj.GetName(), opts.StackName))
+ require.Equal(t, obj.GetNamespace(), opts.StackNamespace)
+
+ switch o := obj.(type) {
+ case *corev1.Secret:
+ require.Contains(t, o.Annotations, CertificateIssuer)
+ require.Contains(t, o.Annotations, CertificateNotAfterAnnotation)
+ require.Contains(t, o.Annotations, CertificateNotBeforeAnnotation)
+ }
+ }
+}
+
+func TestApplyDefaultSettings_EmptySecrets(t *testing.T) {
+ cfg := configv1.BuiltInCertManagement{
+ CACertValidity: "10m",
+ CACertRefresh: "5m",
+ CertValidity: "2m",
+ CertRefresh: "1m",
+ }
+
+ opts := Options{
+ StackName: "lokistack-dev",
+ StackNamespace: "ns",
+ }
+
+ err := ApplyDefaultSettings(&opts, cfg)
+ require.NoError(t, err)
+
+ cs := ComponentCertSecretNames(opts.StackName)
+
+ for _, name := range cs {
+ cert, ok := opts.Certificates[name]
+ require.True(t, ok)
+ require.NotEmpty(t, cert.Rotation)
+
+ hostnames := []string{
+ fmt.Sprintf("%s.%s.svc", name, opts.StackNamespace),
+ fmt.Sprintf("%s.%s.svc.cluster.local", name, opts.StackNamespace),
+ }
+
+ require.ElementsMatch(t, hostnames, cert.Rotation.Hostnames)
+ require.Equal(t, defaultUserInfo, cert.Rotation.UserInfo)
+ require.Nil(t, cert.Secret)
+ }
+}
+
+func TestApplyDefaultSettings_ExistingSecrets(t *testing.T) {
+ const (
+ stackName = "dev"
+ stackNamespace = "ns"
+ )
+
+ cfg := configv1.BuiltInCertManagement{
+ CACertValidity: "10m",
+ CACertRefresh: "5m",
+ CertValidity: "2m",
+ CertRefresh: "1m",
+ }
+
+ opts := Options{
+ StackName: stackName,
+ StackNamespace: stackNamespace,
+ Certificates: ComponentCertificates{},
+ }
+
+ cs := ComponentCertSecretNames(opts.StackName)
+
+ for _, name := range cs {
+ opts.Certificates[name] = SelfSignedCertKey{
+ Secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: stackNamespace,
+ Annotations: map[string]string{
+ CertificateNotBeforeAnnotation: "not-before",
+ CertificateNotAfterAnnotation: "not-after",
+ },
+ },
+ },
+ }
+ }
+
+ err := ApplyDefaultSettings(&opts, cfg)
+ require.NoError(t, err)
+
+ for _, name := range cs {
+ cert, ok := opts.Certificates[name]
+ require.True(t, ok)
+ require.NotEmpty(t, cert.Rotation)
+
+ hostnames := []string{
+ fmt.Sprintf("%s.%s.svc", name, opts.StackNamespace),
+ fmt.Sprintf("%s.%s.svc.cluster.local", name, opts.StackNamespace),
+ }
+
+ require.ElementsMatch(t, hostnames, cert.Rotation.Hostnames)
+ require.Equal(t, defaultUserInfo, cert.Rotation.UserInfo)
+
+ require.NotNil(t, cert.Secret)
+ }
+}
diff --git a/operator/internal/certrotation/cabundle.go b/operator/internal/certrotation/cabundle.go
new file mode 100644
index 0000000000000..319bfd8d5cce0
--- /dev/null
+++ b/operator/internal/certrotation/cabundle.go
@@ -0,0 +1,86 @@
+package certrotation
+
+import (
+ "bytes"
+ "crypto/x509"
+
+ "github.com/openshift/library-go/pkg/crypto"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/util/cert"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// buildCABundle returns a ConfigMap including all known non-expired signing CAs across rotations.
+func buildCABundle(opts *Options) (client.Object, error) {
+ cm := newConfigMap(*opts)
+
+ certs, err := manageCABundleConfigMap(cm, opts.Signer.RawCA.Config.Certs[0])
+ if err != nil {
+ return nil, err
+ }
+
+ opts.RawCACerts = certs
+
+ caBytes, err := crypto.EncodeCertificates(certs...)
+ if err != nil {
+ return nil, err
+ }
+
+ cm.Data[CAFile] = string(caBytes)
+
+ return cm, nil
+}
+
+func newConfigMap(opts Options) *corev1.ConfigMap {
+ current := opts.CABundle.DeepCopy()
+
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: CABundleName(opts.StackName),
+ Namespace: opts.StackNamespace,
+ },
+ }
+
+ if current != nil {
+ cm.Annotations = current.Annotations
+ cm.Labels = current.Labels
+ cm.Data = current.Data
+ }
+
+ return cm
+}
+
+// manageCABundleConfigMap adds the new certificate to the list of cabundles, eliminates duplicates, and prunes the list of expired
+// certs to trust as signers
+func manageCABundleConfigMap(caBundleConfigMap *corev1.ConfigMap, currentSigner *x509.Certificate) ([]*x509.Certificate, error) {
+ if caBundleConfigMap.Data == nil {
+ caBundleConfigMap.Data = map[string]string{}
+ }
+
+ certificates := []*x509.Certificate{}
+ caBundle := caBundleConfigMap.Data[CAFile]
+ if len(caBundle) > 0 {
+ var err error
+ certificates, err = cert.ParseCertsPEM([]byte(caBundle))
+ if err != nil {
+ return nil, err
+ }
+ }
+ certificates = append([]*x509.Certificate{currentSigner}, certificates...)
+ certificates = crypto.FilterExpiredCerts(certificates...)
+
+ finalCertificates := []*x509.Certificate{}
+ // now check for duplicates. n^2, but super simple
+nextCertificate:
+ for i := range certificates {
+ for j := range finalCertificates {
+ if bytes.Equal(certificates[i].Raw, finalCertificates[j].Raw) {
+ continue nextCertificate
+ }
+ }
+ finalCertificates = append(finalCertificates, certificates[i])
+ }
+
+ return finalCertificates, nil
+}
diff --git a/operator/internal/certrotation/cabundle_test.go b/operator/internal/certrotation/cabundle_test.go
new file mode 100644
index 0000000000000..2e8ecbc1324e2
--- /dev/null
+++ b/operator/internal/certrotation/cabundle_test.go
@@ -0,0 +1,74 @@
+package certrotation
+
+import (
+ "bytes"
+ "testing"
+ "time"
+
+ "github.com/openshift/library-go/pkg/crypto"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestBuildCABundle_Create(t *testing.T) {
+ rawCA, _ := newTestCABundle(t, "test-ca")
+
+ opts := &Options{
+ StackName: "dev",
+ StackNamespace: "ns",
+ Signer: SigningCA{
+ RawCA: rawCA,
+ },
+ }
+
+ obj, err := buildCABundle(opts)
+ require.NoError(t, err)
+ require.NotNil(t, obj)
+ require.Len(t, opts.RawCACerts, 1)
+}
+
+func TestBuildCABundle_Append(t *testing.T) {
+ _, rawCABytes := newTestCABundle(t, "test-ca")
+ newRawCA, _ := newTestCABundle(t, "test-ca-other")
+
+ opts := &Options{
+ StackName: "dev",
+ StackNamespace: "ns",
+ Signer: SigningCA{
+ RawCA: newRawCA,
+ },
+ CABundle: &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "dev-ca-bundle",
+ Namespace: "ns",
+ },
+ Data: map[string]string{
+ CAFile: string(rawCABytes),
+ },
+ },
+ }
+
+ obj, err := buildCABundle(opts)
+ require.NoError(t, err)
+ require.NotNil(t, obj)
+ require.Len(t, opts.RawCACerts, 2)
+}
+
+func newTestCABundle(t *testing.T, name string) (*crypto.CA, []byte) {
+ testCA, err := crypto.MakeSelfSignedCAConfigForDuration(name, 1*time.Hour)
+ require.NoError(t, err)
+
+ certBytes := &bytes.Buffer{}
+ keyBytes := &bytes.Buffer{}
+ err = testCA.WriteCertConfig(certBytes, keyBytes)
+ require.NoError(t, err)
+
+ rawCA, err := crypto.GetCAFromBytes(certBytes.Bytes(), keyBytes.Bytes())
+ require.NoError(t, err)
+
+ rawCABytes, err := crypto.EncodeCertificates(rawCA.Config.Certs...)
+ require.NoError(t, err)
+
+ return rawCA, rawCABytes
+}
diff --git a/operator/internal/certrotation/expiry.go b/operator/internal/certrotation/expiry.go
new file mode 100644
index 0000000000000..aaa2bb6a75a7f
--- /dev/null
+++ b/operator/internal/certrotation/expiry.go
@@ -0,0 +1,17 @@
+package certrotation
+
+import (
+ "fmt"
+ "strings"
+)
+
+// CertExpiredError contains information if a certificate expired
+// and the reasons of expiry.
+type CertExpiredError struct {
+ Message string
+ Reasons []string
+}
+
+func (e *CertExpiredError) Error() string {
+ return fmt.Sprintf("%s for reasons: %s", e.Message, strings.Join(e.Reasons, ", "))
+}
diff --git a/operator/internal/certrotation/options.go b/operator/internal/certrotation/options.go
new file mode 100644
index 0000000000000..4b30ae7889ff2
--- /dev/null
+++ b/operator/internal/certrotation/options.go
@@ -0,0 +1,86 @@
+package certrotation
+
+import (
+ "crypto/x509"
+ "time"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ "github.com/openshift/library-go/pkg/crypto"
+ corev1 "k8s.io/api/core/v1"
+)
+
+// ComponentCertificates is a map of lokistack component names to TLS certificates
+type ComponentCertificates map[string]SelfSignedCertKey
+
+// Options is a set of configuration values to use when
+// building manifests for LokiStack certificates.
+type Options struct {
+ StackName string
+ StackNamespace string
+ Rotation Rotation
+ Signer SigningCA
+ CABundle *corev1.ConfigMap
+ RawCACerts []*x509.Certificate
+ Certificates ComponentCertificates
+}
+
+// SigningCA rotates a self-signed signing CA stored in a secret. It creates a new one when
+// - refresh duration is over
+// - or 80% of validity is over
+// - or the CA is expired.
+type SigningCA struct {
+ RawCA *crypto.CA
+ Secret *corev1.Secret
+ Rotation signerRotation
+}
+
+// SelfSignedCertKey rotates a key and cert signed by a signing CA and stores it in a secret.
+//
+// It creates a new one when
+// - refresh duration is over
+// - or 80% of validity is over
+// - or the cert is expired.
+// - or the signing CA changes.
+type SelfSignedCertKey struct {
+ Secret *corev1.Secret
+ Rotation certificateRotation
+}
+
+// Rotation define the validity/refresh pairs for certificates
+type Rotation struct {
+ CACertValidity time.Duration
+ CACertRefresh time.Duration
+ TargetCertValidity time.Duration
+ TargetCertRefresh time.Duration
+}
+
+// ParseRotation builds a new RotationOptions struct from the feature gate string values.
+func ParseRotation(cfg configv1.BuiltInCertManagement) (Rotation, error) {
+ caValidity, err := time.ParseDuration(cfg.CACertValidity)
+ if err != nil {
+ return Rotation{}, kverrors.Wrap(err, "failed to parse CA validity duration", "value", cfg.CACertValidity)
+ }
+
+ caRefresh, err := time.ParseDuration(cfg.CACertRefresh)
+ if err != nil {
+ return Rotation{}, kverrors.Wrap(err, "failed to parse CA refresh duration", "value", cfg.CACertRefresh)
+ }
+
+ certValidity, err := time.ParseDuration(cfg.CertValidity)
+ if err != nil {
+ return Rotation{}, kverrors.Wrap(err, "failed to parse target certificate validity duration", "value", cfg.CertValidity)
+ }
+
+ certRefresh, err := time.ParseDuration(cfg.CertRefresh)
+ if err != nil {
+ return Rotation{}, kverrors.Wrap(err, "failed to parse target certificate refresh duration", "value", cfg.CertRefresh)
+ }
+
+ return Rotation{
+ CACertValidity: caValidity,
+ CACertRefresh: caRefresh,
+ TargetCertValidity: certValidity,
+ TargetCertRefresh: certRefresh,
+ }, nil
+}
diff --git a/operator/internal/certrotation/rotation.go b/operator/internal/certrotation/rotation.go
new file mode 100644
index 0000000000000..e2913cec4e889
--- /dev/null
+++ b/operator/internal/certrotation/rotation.go
@@ -0,0 +1,184 @@
+package certrotation
+
+import (
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/openshift/library-go/pkg/certs"
+ "github.com/openshift/library-go/pkg/crypto"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/apiserver/pkg/authentication/user"
+)
+
+var (
+ errMissingIssuer = errors.New("no issuer set")
+ errMissingHostnames = errors.New("no hostnames set")
+ errMissingUserInfo = errors.New("no user info")
+)
+
+type clockFunc func() time.Time
+
+type signerRotation struct {
+ Issuer string
+ Clock clockFunc
+}
+
+func (r *signerRotation) NewCertificate(validity time.Duration) (*crypto.TLSCertificateConfig, error) {
+ if r.Issuer == "" {
+ return nil, errMissingIssuer
+ }
+
+ signerName := fmt.Sprintf("%s@%d", r.Issuer, time.Now().Unix())
+ return crypto.MakeSelfSignedCAConfigForDuration(signerName, validity)
+}
+
+func (r *signerRotation) NeedNewCertificate(annotations map[string]string, refresh time.Duration) string {
+ return needNewCertificate(annotations, r.Clock, refresh, nil)
+}
+
+func (r *signerRotation) SetAnnotations(ca *crypto.TLSCertificateConfig, annotations map[string]string) {
+ annotations[CertificateNotAfterAnnotation] = ca.Certs[0].NotAfter.Format(time.RFC3339)
+ annotations[CertificateNotBeforeAnnotation] = ca.Certs[0].NotBefore.Format(time.RFC3339)
+ annotations[CertificateIssuer] = ca.Certs[0].Issuer.CommonName
+}
+
+type certificateRotation struct {
+ UserInfo user.Info
+ Hostnames []string
+ Clock clockFunc
+}
+
+func (r *certificateRotation) NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) {
+ if r.UserInfo == nil {
+ return nil, errMissingUserInfo
+ }
+ if len(r.Hostnames) == 0 {
+ return nil, errMissingHostnames
+ }
+
+ addClientAuthUsage := func(cert *x509.Certificate) error {
+ cert.ExtKeyUsage = append(cert.ExtKeyUsage, x509.ExtKeyUsageClientAuth)
+ return nil
+ }
+
+ addSubject := func(cert *x509.Certificate) error {
+ cert.Subject = pkix.Name{
+ CommonName: r.UserInfo.GetName(),
+ SerialNumber: r.UserInfo.GetUID(),
+ Organization: r.UserInfo.GetGroups(),
+ }
+ return nil
+ }
+
+ return signer.MakeServerCertForDuration(sets.NewString(r.Hostnames...), validity, addClientAuthUsage, addSubject)
+}
+
+func (r *certificateRotation) NeedNewCertificate(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration) string {
+ reason := needNewCertificate(annotations, r.Clock, refresh, signer)
+ if len(reason) > 0 {
+ return reason
+ }
+
+ // check the signer common name against all the common names in our ca bundle so we don't refresh early
+ signerCommonName := annotations[CertificateIssuer]
+ if signerCommonName == "" {
+ return "missing issuer name"
+ }
+
+ var found bool
+ for _, caCert := range caBundleCerts {
+ if signerCommonName == caCert.Subject.CommonName {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return fmt.Sprintf("issuer %q, not in ca bundle:\n%s", signerCommonName, certs.CertificateBundleToString(caBundleCerts))
+ }
+
+ existingHostnames := sets.NewString(strings.Split(annotations[CertificateHostnames], ",")...)
+ requiredHostnames := sets.NewString(r.Hostnames...)
+ if !existingHostnames.Equal(requiredHostnames) {
+ existingNotRequired := existingHostnames.Difference(requiredHostnames)
+ requiredNotExisting := requiredHostnames.Difference(existingHostnames)
+ return fmt.Sprintf("hostnames %q are existing and not required, %q are required and not existing", strings.Join(existingNotRequired.List(), ","), strings.Join(requiredNotExisting.List(), ","))
+ }
+
+ return ""
+}
+
+func (r *certificateRotation) SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) {
+ hostnames := sets.String{}
+ for _, ip := range cert.Certs[0].IPAddresses {
+ hostnames.Insert(ip.String())
+ }
+ for _, dnsName := range cert.Certs[0].DNSNames {
+ hostnames.Insert(dnsName)
+ }
+
+ annotations[CertificateNotAfterAnnotation] = cert.Certs[0].NotAfter.Format(time.RFC3339)
+ annotations[CertificateNotBeforeAnnotation] = cert.Certs[0].NotBefore.Format(time.RFC3339)
+ annotations[CertificateIssuer] = cert.Certs[0].Issuer.CommonName
+ // List does a sort so that we have a consistent representation
+ annotations[CertificateHostnames] = strings.Join(hostnames.List(), ",")
+}
+
+func needNewCertificate(annotations map[string]string, clock clockFunc, refresh time.Duration, signer *crypto.CA) string {
+ notAfterString := annotations[CertificateNotAfterAnnotation]
+ if len(notAfterString) == 0 {
+ return "missing notAfter"
+ }
+ notAfter, err := time.Parse(time.RFC3339, notAfterString)
+ if err != nil {
+ return fmt.Sprintf("bad expiry: %q", notAfterString)
+ }
+
+ notBeforeString := annotations[CertificateNotBeforeAnnotation]
+ if len(notAfterString) == 0 {
+ return "missing notBefore"
+ }
+ notBefore, err := time.Parse(time.RFC3339, notBeforeString)
+ if err != nil {
+ return fmt.Sprintf("bad expiry: %q", notBeforeString)
+ }
+
+ now := clock()
+
+ // Is cert expired?
+ if now.After(notAfter) {
+ return "already expired"
+ }
+
+ // Refresh only when expired
+ validity := notAfter.Sub(notBefore)
+ if validity == refresh {
+ return ""
+ }
+
+ // Are we at 80% of validity?
+ at80Percent := notAfter.Add(-validity / 5)
+ if now.After(at80Percent) {
+ return fmt.Sprintf("past its latest possible time %v", at80Percent)
+ }
+
+ // If Certificate is past its refresh time, we may have action to take. We only do this if the signer is old enough.
+ developerSpecifiedRefresh := notBefore.Add(refresh)
+ if now.After(developerSpecifiedRefresh) {
+ if signer == nil {
+ return fmt.Sprintf("past its refresh time %v", developerSpecifiedRefresh)
+ }
+
+ // make sure the signer has been valid for more than 10% of the target's refresh time.
+ timeToWaitForTrustRotation := refresh / 10
+ if now.After(signer.Config.Certs[0].NotBefore.Add(timeToWaitForTrustRotation)) {
+ return fmt.Sprintf("past its refresh time %v", developerSpecifiedRefresh)
+ }
+ }
+
+ return ""
+}
diff --git a/operator/internal/certrotation/rotation_test.go b/operator/internal/certrotation/rotation_test.go
new file mode 100644
index 0000000000000..89f17225ced89
--- /dev/null
+++ b/operator/internal/certrotation/rotation_test.go
@@ -0,0 +1,335 @@
+package certrotation
+
+import (
+ stdcrypto "crypto"
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "errors"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/openshift/library-go/pkg/crypto"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSignerRotation_ReturnErrorOnMissingIssuer(t *testing.T) {
+ c := signerRotation{}
+ _, err := c.NewCertificate(1 * time.Hour)
+ require.ErrorIs(t, err, errMissingIssuer)
+}
+
+func TestSignerRotation_SetAnnotations(t *testing.T) {
+ var (
+ now = time.Now()
+ nowFn = func() time.Time { return now }
+ nowCA, err = newTestCACertificate(pkix.Name{CommonName: "creator-tests"}, int64(1), 200*time.Minute, nowFn)
+ )
+ require.NoError(t, err)
+
+ c := signerRotation{}
+
+ annotations := map[string]string{}
+ c.SetAnnotations(nowCA.Config, annotations)
+
+ require.Len(t, annotations, 3)
+ require.Contains(t, annotations, CertificateIssuer)
+ require.Contains(t, annotations, CertificateNotBeforeAnnotation)
+ require.Contains(t, annotations, CertificateNotAfterAnnotation)
+}
+
+func TestSignerRotation_NeedNewCertificate(t *testing.T) {
+ var (
+ now = time.Now()
+ nowFn = func() time.Time { return now }
+ invalidNotAfter, _ = time.Parse(time.RFC3339, "")
+ invalidNotBefore, _ = time.Parse(time.RFC3339, "")
+ )
+
+ tt := []struct {
+ desc string
+ annotations map[string]string
+ refresh time.Duration
+ wantReason string
+ }{
+ {
+ desc: "already expired",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: invalidNotAfter.Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: invalidNotBefore.Format(time.RFC3339),
+ },
+ refresh: 2 * time.Minute,
+ wantReason: "already expired",
+ },
+ {
+ desc: "refresh only when expired",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339),
+ },
+ refresh: 90 * time.Minute,
+ },
+ {
+ desc: "at 80 percent validity",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: now.Add(18 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-72 * time.Minute).Format(time.RFC3339),
+ },
+ refresh: 40 * time.Minute,
+ wantReason: "past its latest possible time",
+ },
+ {
+ desc: "past its refresh time",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339),
+ },
+ refresh: 40 * time.Minute,
+ wantReason: "past its refresh time",
+ },
+ }
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ c := signerRotation{Clock: nowFn}
+ reason := c.NeedNewCertificate(tc.annotations, tc.refresh)
+ require.Contains(t, reason, tc.wantReason)
+ })
+ }
+}
+
+func TestCertificateRotation_ReturnErrorOnMissingUserInfo(t *testing.T) {
+ c := certificateRotation{}
+ _, err := c.NewCertificate(nil, 1*time.Hour)
+ require.ErrorIs(t, err, errMissingUserInfo)
+}
+
+func TestCertificateRotation_ReturnErrorOnMissingHostnames(t *testing.T) {
+ c := certificateRotation{UserInfo: defaultUserInfo}
+ _, err := c.NewCertificate(nil, 1*time.Hour)
+ require.ErrorIs(t, err, errMissingHostnames)
+}
+
+func TestCertificateRotation_CertHasRequiredExtensions(t *testing.T) {
+ var (
+ now = time.Now()
+ nowFn = func() time.Time { return now }
+ nowCA, err = newTestCACertificate(pkix.Name{CommonName: "creator-tests"}, int64(1), 200*time.Minute, nowFn)
+ )
+ require.NoError(t, err)
+
+ c := certificateRotation{
+ UserInfo: defaultUserInfo,
+ Hostnames: []string{"example.org"},
+ }
+ cert, err := c.NewCertificate(nowCA, 1*time.Hour)
+ require.NoError(t, err)
+
+ require.Contains(t, cert.Certs[0].ExtKeyUsage, x509.ExtKeyUsageServerAuth)
+ require.Contains(t, cert.Certs[0].ExtKeyUsage, x509.ExtKeyUsageClientAuth)
+ require.Equal(t, defaultUserInfo.GetName(), cert.Certs[0].Subject.CommonName)
+ require.Equal(t, defaultUserInfo.GetUID(), cert.Certs[0].Subject.SerialNumber)
+ require.Equal(t, defaultUserInfo.GetGroups(), cert.Certs[0].Subject.Organization)
+}
+
+func TestCertificateRotation_SetAnnotations(t *testing.T) {
+ var (
+ now = time.Now()
+ nowFn = func() time.Time { return now }
+ nowCA, err = newTestCACertificate(pkix.Name{CommonName: "creator-tests"}, int64(1), 200*time.Minute, nowFn)
+ )
+ require.NoError(t, err)
+
+ c := certificateRotation{Hostnames: []string{"example.org"}}
+
+ annotations := map[string]string{}
+ c.SetAnnotations(nowCA.Config, annotations)
+
+ require.Len(t, annotations, 4)
+ require.Contains(t, annotations, CertificateIssuer)
+ require.Contains(t, annotations, CertificateNotBeforeAnnotation)
+ require.Contains(t, annotations, CertificateNotAfterAnnotation)
+ require.Contains(t, annotations, CertificateHostnames)
+}
+
+func TestCertificateRotation_NeedNewCertificate(t *testing.T) {
+ var (
+ now = time.Now()
+ nowFn = func() time.Time { return now }
+ invalidNotAfter, _ = time.Parse(time.RFC3339, "")
+ invalidNotBefore, _ = time.Parse(time.RFC3339, "")
+ nowCA, _ = newTestCACertificate(pkix.Name{CommonName: "creator-tests"}, int64(1), 200*time.Minute, nowFn)
+
+ twentyMinutesBeforeNow = time.Now().Add(-20 * time.Minute)
+ twentyMinutesBeforeNowFn = func() time.Time { return twentyMinutesBeforeNow }
+ twentyMinutesBeforeCA, _ = newTestCACertificate(pkix.Name{CommonName: "creator-tests"}, int64(1), 200*time.Minute, twentyMinutesBeforeNowFn)
+ )
+
+ tt := []struct {
+ desc string
+ annotations map[string]string
+ signerFn func() (*crypto.CA, error)
+ refresh time.Duration
+ wantReason string
+ }{
+ {
+ desc: "already expired",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: invalidNotAfter.Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: invalidNotBefore.Format(time.RFC3339),
+ },
+ signerFn: func() (*crypto.CA, error) {
+ return nowCA, nil
+ },
+ refresh: 2 * time.Minute,
+ wantReason: "already expired",
+ },
+ {
+ desc: "refresh only when expired",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339),
+ },
+ signerFn: func() (*crypto.CA, error) {
+ return nowCA, nil
+ },
+ refresh: 90 * time.Minute,
+ },
+ {
+ desc: "at 80 percent validity",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: now.Add(18 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-72 * time.Minute).Format(time.RFC3339),
+ },
+ signerFn: func() (*crypto.CA, error) {
+ return nowCA, nil
+ },
+ refresh: 40 * time.Minute,
+ wantReason: "past its latest possible time",
+ },
+ {
+ desc: "past its refresh time",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339),
+ },
+ signerFn: func() (*crypto.CA, error) {
+ return twentyMinutesBeforeCA, nil
+ },
+ refresh: 40 * time.Minute,
+ wantReason: "past its refresh time",
+ },
+ {
+ desc: "missing issuer name",
+ annotations: map[string]string{
+ CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339),
+ },
+ signerFn: func() (*crypto.CA, error) {
+ return nowCA, nil
+ },
+ refresh: 70 * time.Minute,
+ wantReason: "missing issuer name",
+ },
+ {
+ desc: "issuer not in ca bundle",
+ annotations: map[string]string{
+ CertificateIssuer: "issuer-not-in-any-ca",
+ CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339),
+ },
+ signerFn: func() (*crypto.CA, error) {
+ return nowCA, nil
+ },
+ refresh: 70 * time.Minute,
+ wantReason: `issuer "issuer-not-in-any-ca", not in ca bundle`,
+ },
+ {
+ desc: "missing hostnames",
+ annotations: map[string]string{
+ CertificateIssuer: "creator-tests",
+ CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339),
+ },
+ signerFn: func() (*crypto.CA, error) {
+ return nowCA, nil
+ },
+ refresh: 70 * time.Minute,
+ wantReason: "are required and not existing",
+ },
+ }
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ rawCA, err := tc.signerFn()
+ require.NoError(t, err)
+
+ c := certificateRotation{
+ Clock: nowFn,
+ Hostnames: []string{"a.b.c.d", "e.d.f.g"},
+ }
+ reason := c.NeedNewCertificate(tc.annotations, rawCA, rawCA.Config.Certs, tc.refresh)
+ require.Contains(t, reason, tc.wantReason)
+ })
+ }
+}
+
+func newTestCACertificate(subject pkix.Name, serialNumber int64, validity time.Duration, currentTime func() time.Time) (*crypto.CA, error) {
+ caPublicKey, caPrivateKey, err := crypto.NewKeyPair()
+ if err != nil {
+ return nil, err
+ }
+
+ caCert := &x509.Certificate{
+ Subject: subject,
+
+ SignatureAlgorithm: x509.SHA256WithRSA,
+
+ NotBefore: currentTime().Add(-1 * time.Second),
+ NotAfter: currentTime().Add(validity),
+ SerialNumber: big.NewInt(serialNumber),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+
+ cert, err := signCertificate(caCert, caPublicKey, caCert, caPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return &crypto.CA{
+ Config: &crypto.TLSCertificateConfig{
+ Certs: []*x509.Certificate{cert},
+ Key: caPrivateKey,
+ },
+ SerialGenerator: &crypto.RandomSerialGenerator{},
+ }, nil
+}
+
+func signCertificate(template *x509.Certificate, requestKey stdcrypto.PublicKey, issuer *x509.Certificate, issuerKey stdcrypto.PrivateKey) (*x509.Certificate, error) {
+ derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey)
+ if err != nil {
+ return nil, err
+ }
+ certs, err := x509.ParseCertificates(derBytes)
+ if err != nil {
+ return nil, err
+ }
+ if len(certs) != 1 {
+ return nil, errors.New("Expected a single certificate")
+ }
+ return certs[0], nil
+}
diff --git a/operator/internal/certrotation/signer.go b/operator/internal/certrotation/signer.go
new file mode 100644
index 0000000000000..244c18b94a30d
--- /dev/null
+++ b/operator/internal/certrotation/signer.go
@@ -0,0 +1,100 @@
+package certrotation
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ "github.com/openshift/library-go/pkg/crypto"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// SigningCAExpired returns true if the signer certificate expired and the reason of expiry.
+func SigningCAExpired(opts Options) error {
+ // Skip as secret not created or loaded
+ if opts.Signer.Secret == nil {
+ return nil
+ }
+
+ reason := opts.Signer.Rotation.NeedNewCertificate(opts.Signer.Secret.Annotations, opts.Rotation.CACertRefresh)
+ if reason != "" {
+ return &CertExpiredError{Message: "signing CA certificate expired", Reasons: []string{reason}}
+ }
+
+ return nil
+}
+
+// buildSigningCASecret returns a k8s Secret holding the signing CA certificate
+func buildSigningCASecret(opts *Options) (client.Object, error) {
+ signingCertKeyPairSecret := newSigningCASecret(*opts)
+ opts.Signer.Rotation.Issuer = fmt.Sprintf("%s_%s", signingCertKeyPairSecret.Namespace, signingCertKeyPairSecret.Name)
+
+ if reason := opts.Signer.Rotation.NeedNewCertificate(signingCertKeyPairSecret.Annotations, opts.Rotation.CACertRefresh); reason != "" {
+ if err := setSigningCertKeyPairSecret(signingCertKeyPairSecret, opts.Rotation.CACertValidity, opts.Signer.Rotation); err != nil {
+ return nil, err
+ }
+ }
+
+ var (
+ cert = signingCertKeyPairSecret.Data[corev1.TLSCertKey]
+ key = signingCertKeyPairSecret.Data[corev1.TLSPrivateKeyKey]
+ )
+
+ rawCA, err := crypto.GetCAFromBytes(cert, key)
+ if err != nil {
+ return nil, err
+ }
+
+ opts.Signer.RawCA = rawCA
+
+ return signingCertKeyPairSecret, nil
+}
+
+func newSigningCASecret(opts Options) *corev1.Secret {
+ current := opts.Signer.Secret.DeepCopy()
+
+ s := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: SigningCASecretName(opts.StackName),
+ Namespace: opts.StackNamespace,
+ },
+ Type: corev1.SecretTypeTLS,
+ }
+
+ if current != nil {
+ s.Annotations = current.Annotations
+ s.Labels = current.Labels
+ s.Data = current.Data
+ }
+
+ return s
+}
+
+// setSigningCertKeyPairSecret creates a new signing cert/key pair and sets them in the secret
+func setSigningCertKeyPairSecret(s *corev1.Secret, validity time.Duration, caCreator signerRotation) error {
+ if s.Annotations == nil {
+ s.Annotations = map[string]string{}
+ }
+ if s.Data == nil {
+ s.Data = map[string][]byte{}
+ }
+
+ ca, err := caCreator.NewCertificate(validity)
+ if err != nil {
+ return err
+ }
+
+ certBytes := &bytes.Buffer{}
+ keyBytes := &bytes.Buffer{}
+ if err := ca.WriteCertConfig(certBytes, keyBytes); err != nil {
+ return err
+ }
+ s.Data[corev1.TLSCertKey] = certBytes.Bytes()
+ s.Data[corev1.TLSPrivateKeyKey] = keyBytes.Bytes()
+ caCreator.SetAnnotations(ca, s.Annotations)
+
+ return nil
+}
diff --git a/operator/internal/certrotation/signer_test.go b/operator/internal/certrotation/signer_test.go
new file mode 100644
index 0000000000000..8b4778053ee05
--- /dev/null
+++ b/operator/internal/certrotation/signer_test.go
@@ -0,0 +1,131 @@
+package certrotation
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestSigningCAExpired_EmptySecret(t *testing.T) {
+ opts := Options{
+ StackName: "dev",
+ StackNamespace: "ns",
+ }
+
+ err := SigningCAExpired(opts)
+ require.NoError(t, err)
+}
+
+func TestSigningCAExpired_ExpiredSecret(t *testing.T) {
+ var (
+ stackName = "dev"
+ stackNamespace = "ns"
+ clock = time.Now
+ invalidNotAfter, _ = time.Parse(time.RFC3339, "")
+ invalidNotBefore, _ = time.Parse(time.RFC3339, "")
+ )
+
+ opts := Options{
+ StackName: stackName,
+ StackNamespace: stackNamespace,
+ Signer: SigningCA{
+ Rotation: signerRotation{
+ Clock: clock,
+ },
+ Secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: SigningCASecretName(stackName),
+ Namespace: stackNamespace,
+ Annotations: map[string]string{
+ CertificateIssuer: "dev_ns@signing-ca@10000",
+ CertificateNotAfterAnnotation: invalidNotAfter.Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: invalidNotBefore.Format(time.RFC3339),
+ },
+ },
+ },
+ },
+ }
+
+ err := SigningCAExpired(opts)
+
+ e := &CertExpiredError{}
+ require.Error(t, err)
+ require.ErrorAs(t, err, &e)
+ require.Contains(t, err.(*CertExpiredError).Reasons, "already expired")
+}
+
+func TestBuildSigningCASecret_Create(t *testing.T) {
+ opts := &Options{
+ StackName: "dev",
+ StackNamespace: "ns",
+ }
+
+ obj, err := buildSigningCASecret(opts)
+ require.NoError(t, err)
+ require.NotNil(t, obj)
+ require.NotNil(t, opts.Signer.RawCA)
+
+ s := obj.(*corev1.Secret)
+ // Require mandatory annotations for rotation
+ require.Contains(t, s.Annotations, CertificateIssuer)
+ require.Contains(t, s.Annotations, CertificateNotAfterAnnotation)
+ require.Contains(t, s.Annotations, CertificateNotBeforeAnnotation)
+
+ // Require cert-key-pair in data section
+ require.NotEmpty(t, s.Data[corev1.TLSCertKey])
+ require.NotEmpty(t, s.Data[corev1.TLSPrivateKeyKey])
+}
+
+func TestBuildSigningCASecret_Rotate(t *testing.T) {
+ var (
+ clock = time.Now
+ invalidNotAfter, _ = time.Parse(time.RFC3339, "")
+ invalidNotBefore, _ = time.Parse(time.RFC3339, "")
+ )
+
+ opts := &Options{
+ StackName: "dev",
+ StackNamespace: "ns",
+ Signer: SigningCA{
+ Rotation: signerRotation{
+ Clock: clock,
+ },
+ Secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "dev-signing-ca",
+ Namespace: "ns",
+ Annotations: map[string]string{
+ CertificateIssuer: "dev_ns@signing-ca@10000",
+ CertificateNotAfterAnnotation: invalidNotAfter.Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: invalidNotBefore.Format(time.RFC3339),
+ },
+ },
+ },
+ },
+ }
+
+ obj, err := buildSigningCASecret(opts)
+ require.NoError(t, err)
+ require.NotNil(t, obj)
+ require.NotNil(t, opts.Signer.RawCA)
+
+ s := obj.(*corev1.Secret)
+ // Require mandatory annotations for rotation
+ require.Contains(t, s.Annotations, CertificateIssuer)
+ require.Contains(t, s.Annotations, CertificateNotAfterAnnotation)
+ require.Contains(t, s.Annotations, CertificateNotBeforeAnnotation)
+
+ // Require cert-key-pair in data section
+ require.NotEmpty(t, s.Data[corev1.TLSCertKey])
+ require.NotEmpty(t, s.Data[corev1.TLSPrivateKeyKey])
+
+ // Require rotation
+ require.NotEqual(t, s.Annotations[CertificateIssuer], opts.Signer.Secret.Annotations[CertificateIssuer])
+ require.NotEqual(t, s.Annotations[CertificateNotAfterAnnotation], opts.Signer.Secret.Annotations[CertificateNotAfterAnnotation])
+ require.NotEqual(t, s.Annotations[CertificateNotBeforeAnnotation], opts.Signer.Secret.Annotations[CertificateNotBeforeAnnotation])
+ require.NotEqual(t, string(s.Data[corev1.TLSCertKey]), string(opts.Signer.Secret.Data[corev1.TLSCertKey]))
+ require.NotEqual(t, string(s.Data[corev1.TLSPrivateKeyKey]), string(opts.Signer.Secret.Data[corev1.TLSPrivateKeyKey]))
+}
diff --git a/operator/internal/certrotation/target.go b/operator/internal/certrotation/target.go
new file mode 100644
index 0000000000000..db49a1edd706c
--- /dev/null
+++ b/operator/internal/certrotation/target.go
@@ -0,0 +1,126 @@
+package certrotation
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
+ "github.com/openshift/library-go/pkg/crypto"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// CertificatesExpired returns an error if any certificates expired and the list of expiry reasons.
+func CertificatesExpired(opts Options) error {
+ if opts.Signer.Secret == nil || opts.CABundle == nil {
+ return nil
+ }
+
+ for _, cert := range opts.Certificates {
+ if cert.Secret == nil {
+ return nil
+ }
+ }
+
+ rawCA, err := crypto.GetCAFromBytes(opts.Signer.Secret.Data[corev1.TLSCertKey], opts.Signer.Secret.Data[corev1.TLSPrivateKeyKey])
+ if err != nil {
+ return kverrors.Wrap(err, "failed to get signing CA from secret")
+ }
+
+ caBundle := opts.CABundle.Data[CAFile]
+ caCerts, err := crypto.CertsFromPEM([]byte(caBundle))
+ if err != nil {
+ return kverrors.Wrap(err, "failed to get ca bundle certificates from configmap")
+ }
+
+ var reasons []string
+ for name, cert := range opts.Certificates {
+ reason := cert.Rotation.NeedNewCertificate(cert.Secret.Annotations, rawCA, caCerts, opts.Rotation.TargetCertRefresh)
+ if reason != "" {
+ reasons = append(reasons, fmt.Sprintf("%s: %s", name, reason))
+ }
+ }
+
+ if len(reasons) == 0 {
+ return nil
+ }
+
+ return &CertExpiredError{Message: "certificates expired", Reasons: reasons}
+}
+
+// buildTargetCertKeyPairSecrets returns a slice of all rotated client and serving lokistack certificates.
+func buildTargetCertKeyPairSecrets(opts Options) ([]client.Object, error) {
+ var (
+ res = make([]client.Object, 0)
+ ns = opts.StackNamespace
+ rawCA = opts.Signer.RawCA
+ caBundle = opts.RawCACerts
+ validity = opts.Rotation.TargetCertValidity
+ refresh = opts.Rotation.TargetCertRefresh
+ )
+
+ for name, cert := range opts.Certificates {
+ secret := newTargetCertificateSecret(name, ns, cert.Secret)
+ reason := cert.Rotation.NeedNewCertificate(secret.Annotations, rawCA, caBundle, refresh)
+ if len(reason) > 0 {
+ if err := setTargetCertKeyPairSecret(secret, validity, rawCA, cert.Rotation); err != nil {
+ return nil, err
+ }
+ }
+
+ res = append(res, secret)
+ }
+
+ return res, nil
+}
+
+func newTargetCertificateSecret(name, ns string, s *corev1.Secret) *corev1.Secret {
+ current := s.DeepCopy()
+
+ ss := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: ns,
+ },
+ Type: corev1.SecretTypeTLS,
+ }
+
+ if current != nil {
+ ss.Annotations = current.Annotations
+ ss.Labels = current.Labels
+ ss.Data = current.Data
+ }
+
+ return ss
+}
+
+// setTargetCertKeyPairSecret creates a new cert/key pair and sets them in the secret. Only one of client, serving, or signer rotation may be specified.
+func setTargetCertKeyPairSecret(s *corev1.Secret, validity time.Duration, signer *crypto.CA, certCreator certificateRotation) error {
+ if s.Annotations == nil {
+ s.Annotations = map[string]string{}
+ }
+ if s.Data == nil {
+ s.Data = map[string][]byte{}
+ }
+
+ // our annotation is based on our cert validity, so we want to make sure that we don't specify something past our signer
+ targetValidity := validity
+ remainingSignerValidity := time.Until(signer.Config.Certs[0].NotAfter)
+ if remainingSignerValidity < validity {
+ targetValidity = remainingSignerValidity
+ }
+
+ certKeyPair, err := certCreator.NewCertificate(signer, targetValidity)
+ if err != nil {
+ return err
+ }
+
+ s.Data[corev1.TLSCertKey], s.Data[corev1.TLSPrivateKeyKey], err = certKeyPair.GetPEMBytes()
+ if err != nil {
+ return err
+ }
+ certCreator.SetAnnotations(certKeyPair, s.Annotations)
+
+ return nil
+}
diff --git a/operator/internal/certrotation/target_test.go b/operator/internal/certrotation/target_test.go
new file mode 100644
index 0000000000000..c705e9c50816f
--- /dev/null
+++ b/operator/internal/certrotation/target_test.go
@@ -0,0 +1,165 @@
+package certrotation
+
+import (
+ "crypto/x509"
+ "testing"
+ "time"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/util/cert"
+)
+
+func TestCertificatesExpired(t *testing.T) {
+ var (
+ stackName = "dev"
+ stackNamespce = "ns"
+ invalidNotAfter, _ = time.Parse(time.RFC3339, "")
+ invalidNotBefore, _ = time.Parse(time.RFC3339, "")
+ rawCA, caBytes = newTestCABundle(t, "dev-ca")
+ cfg = configv1.BuiltInCertManagement{
+ CACertValidity: "10m",
+ CACertRefresh: "5m",
+ CertValidity: "2m",
+ CertRefresh: "1m",
+ }
+ )
+
+ certBytes, keyBytes, err := rawCA.Config.GetPEMBytes()
+ require.NoError(t, err)
+
+ opts := Options{
+ StackName: stackName,
+ StackNamespace: stackNamespce,
+ Signer: SigningCA{
+ RawCA: rawCA,
+ Secret: &corev1.Secret{
+ Data: map[string][]byte{
+ corev1.TLSCertKey: certBytes,
+ corev1.TLSPrivateKeyKey: keyBytes,
+ },
+ },
+ },
+ CABundle: &corev1.ConfigMap{
+ Data: map[string]string{
+ CAFile: string(caBytes),
+ },
+ },
+ RawCACerts: rawCA.Config.Certs,
+ }
+ err = ApplyDefaultSettings(&opts, cfg)
+ require.NoError(t, err)
+
+ for _, name := range ComponentCertSecretNames(stackName) {
+ cert := opts.Certificates[name]
+ cert.Secret = &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: stackNamespce,
+ Annotations: map[string]string{
+ CertificateIssuer: "dev_ns@signing-ca@10000",
+ CertificateNotAfterAnnotation: invalidNotAfter.Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: invalidNotBefore.Format(time.RFC3339),
+ },
+ },
+ }
+ opts.Certificates[name] = cert
+ }
+
+ var expired *CertExpiredError
+ err = CertificatesExpired(opts)
+
+ require.Error(t, err)
+ require.ErrorAs(t, err, &expired)
+ require.Len(t, err.(*CertExpiredError).Reasons, 15)
+}
+
+func TestBuildTargetCertKeyPairSecrets_Create(t *testing.T) {
+ var (
+ rawCA, _ = newTestCABundle(t, "test-ca")
+ cfg = configv1.BuiltInCertManagement{
+ CACertValidity: "10m",
+ CACertRefresh: "5m",
+ CertValidity: "2m",
+ CertRefresh: "1m",
+ }
+ )
+
+ opts := Options{
+ StackName: "dev",
+ StackNamespace: "ns",
+ Signer: SigningCA{
+ RawCA: rawCA,
+ },
+ RawCACerts: rawCA.Config.Certs,
+ }
+
+ err := ApplyDefaultSettings(&opts, cfg)
+ require.NoError(t, err)
+
+ objs, err := buildTargetCertKeyPairSecrets(opts)
+ require.NoError(t, err)
+ require.Len(t, objs, 15)
+}
+
+func TestBuildTargetCertKeyPairSecrets_Rotate(t *testing.T) {
+ var (
+ rawCA, _ = newTestCABundle(t, "test-ca")
+ invalidNotAfter, _ = time.Parse(time.RFC3339, "")
+ invalidNotBefore, _ = time.Parse(time.RFC3339, "")
+ cfg = configv1.BuiltInCertManagement{
+ CACertValidity: "10m",
+ CACertRefresh: "5m",
+ CertValidity: "2m",
+ CertRefresh: "1m",
+ }
+ )
+
+ opts := Options{
+ StackName: "dev",
+ StackNamespace: "ns",
+ Signer: SigningCA{
+ RawCA: rawCA,
+ },
+ RawCACerts: rawCA.Config.Certs,
+ Certificates: map[string]SelfSignedCertKey{
+ "dev-ingester-grpc": {
+ Secret: &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "dev-ingester-grpc",
+ Namespace: "ns",
+ Annotations: map[string]string{
+ CertificateIssuer: "dev_ns@signing-ca@10000",
+ CertificateNotAfterAnnotation: invalidNotAfter.Format(time.RFC3339),
+ CertificateNotBeforeAnnotation: invalidNotBefore.Format(time.RFC3339),
+ },
+ },
+ },
+ },
+ },
+ }
+ err := ApplyDefaultSettings(&opts, cfg)
+ require.NoError(t, err)
+
+ objs, err := buildTargetCertKeyPairSecrets(opts)
+ require.NoError(t, err)
+ require.Len(t, objs, 15)
+
+ // Check serving certificate rotation
+ s := objs[7].(*corev1.Secret)
+ ss := opts.Certificates["dev-ingester-grpc"]
+
+ require.NotEqual(t, s.Annotations[CertificateIssuer], ss.Secret.Annotations[CertificateIssuer])
+ require.NotEqual(t, s.Annotations[CertificateNotAfterAnnotation], ss.Secret.Annotations[CertificateNotAfterAnnotation])
+ require.NotEqual(t, s.Annotations[CertificateNotBeforeAnnotation], ss.Secret.Annotations[CertificateNotBeforeAnnotation])
+ require.NotEqual(t, s.Annotations[CertificateHostnames], ss.Secret.Annotations[CertificateHostnames])
+ require.NotEqual(t, string(s.Data[corev1.TLSCertKey]), string(ss.Secret.Data[corev1.TLSCertKey]))
+ require.NotEqual(t, string(s.Data[corev1.TLSPrivateKeyKey]), string(ss.Secret.Data[corev1.TLSPrivateKeyKey]))
+
+ certs, err := cert.ParseCertsPEM(s.Data[corev1.TLSCertKey])
+ require.NoError(t, err)
+ require.Contains(t, certs[0].ExtKeyUsage, x509.ExtKeyUsageClientAuth)
+ require.Contains(t, certs[0].ExtKeyUsage, x509.ExtKeyUsageServerAuth)
+}
diff --git a/operator/internal/certrotation/var.go b/operator/internal/certrotation/var.go
new file mode 100644
index 0000000000000..1134ebbd135f8
--- /dev/null
+++ b/operator/internal/certrotation/var.go
@@ -0,0 +1,52 @@
+package certrotation
+
+import (
+ "fmt"
+)
+
+const (
+ // CertificateNotBeforeAnnotation contains the certificate expiration date in RFC3339 format.
+ CertificateNotBeforeAnnotation = "loki.grafana.com/certificate-not-before"
+ // CertificateNotAfterAnnotation contains the certificate expiration date in RFC3339 format.
+ CertificateNotAfterAnnotation = "loki.grafana.com/certificate-not-after"
+ // CertificateIssuer contains the common name of the certificate that signed another certificate.
+ CertificateIssuer = "loki.grafana.com/certificate-issuer"
+ // CertificateHostnames contains the hostnames used by a signer.
+ CertificateHostnames = "loki.grafana.com/certificate-hostnames"
+)
+
+const (
+ // CAFile is the file name of the certificate authority file
+ CAFile = "service-ca.crt"
+)
+
+// SigningCASecretName returns the lokistack signing CA secret name
+func SigningCASecretName(stackName string) string {
+ return fmt.Sprintf("%s-signing-ca", stackName)
+}
+
+// CABundleName returns the lokistack ca bundle configmap name
+func CABundleName(stackName string) string {
+ return fmt.Sprintf("%s-ca-bundle", stackName)
+}
+
+// ComponentCertSecretNames retruns a list of all loki component certificate secret names.
+func ComponentCertSecretNames(stackName string) []string {
+ return []string{
+ fmt.Sprintf("%s-gateway-client-http", stackName),
+ fmt.Sprintf("%s-compactor-http", stackName),
+ fmt.Sprintf("%s-compactor-grpc", stackName),
+ fmt.Sprintf("%s-distributor-http", stackName),
+ fmt.Sprintf("%s-distributor-grpc", stackName),
+ fmt.Sprintf("%s-index-gateway-http", stackName),
+ fmt.Sprintf("%s-index-gateway-grpc", stackName),
+ fmt.Sprintf("%s-ingester-http", stackName),
+ fmt.Sprintf("%s-ingester-grpc", stackName),
+ fmt.Sprintf("%s-querier-http", stackName),
+ fmt.Sprintf("%s-querier-grpc", stackName),
+ fmt.Sprintf("%s-query-frontend-http", stackName),
+ fmt.Sprintf("%s-query-frontend-grpc", stackName),
+ fmt.Sprintf("%s-ruler-http", stackName),
+ fmt.Sprintf("%s-ruler-grpc", stackName),
+ }
+}
diff --git a/operator/internal/handlers/internal/certificates/options.go b/operator/internal/handlers/internal/certificates/options.go
new file mode 100644
index 0000000000000..017f3f372d1ad
--- /dev/null
+++ b/operator/internal/handlers/internal/certificates/options.go
@@ -0,0 +1,128 @@
+package certificates
+
+import (
+ "context"
+ "regexp"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/certrotation"
+ "github.com/grafana/loki/operator/internal/external/k8s"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var serviceCAnnotationsRe = regexp.MustCompile(`^service.(?:alpha|beta)\.openshift\.io\/.+`)
+
+// GetOptions return a certrotation options struct filled with all found client and serving certificate secrets if any found.
+// Return an error only if either the k8s client returns any other error except IsNotFound or if merging options fails.
+func GetOptions(ctx context.Context, k k8s.Client, req ctrl.Request, mode lokiv1.ModeType) (certrotation.Options, error) {
+ name := certrotation.SigningCASecretName(req.Name)
+ ca, err := getSecret(ctx, k, name, req.Namespace)
+ if err != nil {
+ if !apierrors.IsNotFound(err) {
+ return certrotation.Options{}, kverrors.Wrap(err, "failed to get signing ca secret", "name", name)
+ }
+ }
+
+ name = certrotation.CABundleName(req.Name)
+ bundle, err := getConfigMap(ctx, k, name, req.Namespace)
+ if err != nil {
+ if !apierrors.IsNotFound(err) {
+ return certrotation.Options{}, kverrors.Wrap(err, "failed to get ca bundle secret", "name", name)
+ }
+ }
+ configureCABundleForTenantMode(bundle, mode)
+
+ certs, err := getCertificateOptions(ctx, k, req)
+ if err != nil {
+ return certrotation.Options{}, err
+ }
+ configureCertificatesForTenantMode(certs, mode)
+
+ return certrotation.Options{
+ StackName: req.Name,
+ StackNamespace: req.Namespace,
+ Signer: certrotation.SigningCA{
+ Secret: ca,
+ },
+ CABundle: bundle,
+ Certificates: certs,
+ }, nil
+}
+
+func getCertificateOptions(ctx context.Context, k k8s.Client, req ctrl.Request) (certrotation.ComponentCertificates, error) {
+ cs := certrotation.ComponentCertSecretNames(req.Name)
+ certs := make(certrotation.ComponentCertificates, len(cs))
+
+ for _, name := range cs {
+ s, err := getSecret(ctx, k, name, req.Namespace)
+ if err != nil {
+ if !apierrors.IsNotFound(err) {
+ return nil, kverrors.Wrap(err, "failed to get secret", "name", name)
+ }
+ continue
+ }
+
+ certs[name] = certrotation.SelfSignedCertKey{Secret: s}
+ }
+
+ return certs, nil
+}
+
+func getSecret(ctx context.Context, k k8s.Client, name, ns string) (*corev1.Secret, error) {
+ key := client.ObjectKey{Name: name, Namespace: ns}
+ s := &corev1.Secret{}
+ err := k.Get(ctx, key, s)
+ if err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+func getConfigMap(ctx context.Context, k k8s.Client, name, ns string) (*corev1.ConfigMap, error) {
+ key := client.ObjectKey{Name: name, Namespace: ns}
+ s := &corev1.ConfigMap{}
+ err := k.Get(ctx, key, s)
+ if err != nil {
+ return nil, err
+ }
+
+ return s, nil
+}
+
+func configureCertificatesForTenantMode(certs certrotation.ComponentCertificates, mode lokiv1.ModeType) {
+ switch mode {
+ case "", lokiv1.Dynamic, lokiv1.Static:
+ return
+ case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
+ // Remove serviceCA annotations for existing secrets to
+ // enable upgrading secrets to built-in cert management
+ for name := range certs {
+ for key := range certs[name].Secret.Annotations {
+ if serviceCAnnotationsRe.MatchString(key) {
+ delete(certs[name].Secret.Annotations, key)
+ }
+ }
+ }
+ }
+}
+
+func configureCABundleForTenantMode(cm *corev1.ConfigMap, mode lokiv1.ModeType) {
+ switch mode {
+ case "", lokiv1.Dynamic, lokiv1.Static:
+ return
+ case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
+ // Remove serviceCA annotations for existing ConfigMap to
+ // enable upgrading CABundle from built-in cert management
+ for key := range cm.Annotations {
+ if serviceCAnnotationsRe.MatchString(key) {
+ delete(cm.Annotations, key)
+ }
+ }
+ }
+}
diff --git a/operator/internal/handlers/internal/certificates/options_test.go b/operator/internal/handlers/internal/certificates/options_test.go
new file mode 100644
index 0000000000000..16603b32712c2
--- /dev/null
+++ b/operator/internal/handlers/internal/certificates/options_test.go
@@ -0,0 +1,217 @@
+package certificates
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/stretchr/testify/require"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func TestGetOptions_ReturnEmpty_WhenCertificatesNotExisting(t *testing.T) {
+ k := &k8sfakes.FakeClient{}
+
+ req := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "lokistack-dev",
+ Namespace: "ns",
+ },
+ }
+
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ opts, err := GetOptions(context.TODO(), k, req, lokiv1.Static)
+ require.NoError(t, err)
+ require.NotEmpty(t, opts)
+
+ // Basic options always available
+ require.Equal(t, req.Name, opts.StackName)
+ require.Equal(t, req.Namespace, opts.StackNamespace)
+
+ // Require all resource empty as per not existing
+ require.Nil(t, opts.Signer.Secret)
+ require.Nil(t, opts.CABundle)
+ require.Len(t, opts.Certificates, 0)
+}
+
+func TestGetOptions_ReturnSecrets_WhenCertificatesExisting(t *testing.T) {
+ k := &k8sfakes.FakeClient{}
+
+ req := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "lokistack-dev",
+ Namespace: "ns",
+ },
+ }
+
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ obj, ok := getManagedPKIResource(req.Name, req.Namespace, name.Name)
+ if !ok {
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ k.SetClientObject(object, obj)
+ return nil
+ }
+
+ opts, err := GetOptions(context.TODO(), k, req, lokiv1.Static)
+ require.NoError(t, err)
+ require.NotEmpty(t, opts)
+
+ // Basic options always available
+ require.Equal(t, req.Name, opts.StackName)
+ require.Equal(t, req.Namespace, opts.StackNamespace)
+
+ // Check signingCA and CABundle populated into options
+ require.NotNil(t, opts.Signer.Secret)
+ require.NotNil(t, opts.CABundle)
+
+ // Check client certificates populated into options
+ for name, cert := range opts.Certificates {
+ require.NotNil(t, cert.Secret, "missing name %s", name)
+ }
+}
+
+func TestGetOptions_PruneServiceCAAnnotations_ForTenantMode(t *testing.T) {
+ k := &k8sfakes.FakeClient{}
+
+ req := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "lokistack-dev",
+ Namespace: "ns",
+ },
+ }
+
+ pruned := []string{
+ "service.alpha.openshift.io/expiry",
+ "service.beta.openshift.io/expiry",
+ "service.beta.openshift.io/originating-service-name",
+ "service.beta.openshift.io/originating-service-uid",
+ "service.beta.openshift.io/inject-cabundle",
+ }
+
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ obj, ok := getManagedPKIResource(req.Name, req.Namespace, name.Name)
+ if !ok {
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ annotations := map[string]string{}
+ for _, value := range pruned {
+ annotations[value] = "test"
+ }
+ obj.SetAnnotations(annotations)
+
+ k.SetClientObject(object, obj)
+ return nil
+ }
+
+ tt := []struct {
+ mode lokiv1.ModeType
+ wantPrune bool
+ }{
+ {
+ mode: lokiv1.Dynamic,
+ },
+ {
+ mode: lokiv1.Static,
+ },
+ {
+ mode: lokiv1.OpenshiftLogging,
+ wantPrune: true,
+ },
+ {
+ mode: lokiv1.OpenshiftNetwork,
+ wantPrune: true,
+ },
+ }
+
+ for _, tc := range tt {
+ tc := tc
+ t.Run(string(tc.mode), func(t *testing.T) {
+ opts, err := GetOptions(context.TODO(), k, req, tc.mode)
+ require.NoError(t, err)
+ require.NotEmpty(t, opts)
+
+ if !tc.wantPrune {
+ return
+ }
+
+ // Require CABundle ConfigMap annotations to be pruned
+ for _, annotation := range pruned {
+ require.NotContains(t, opts.CABundle.Annotations, annotation)
+ }
+
+ // Require Certificate Secrets annotations to be pruned
+ for _, cert := range opts.Certificates {
+ for _, annotation := range pruned {
+ require.NotContains(t, cert.Secret.Annotations, annotation)
+ }
+ }
+ })
+ }
+}
+
+func getManagedPKIResource(stackName, stackNamespace, name string) (client.Object, bool) {
+ certNames := []string{
+ "signing-ca",
+ "ca-bundle",
+ // client certificates
+ "gateway-client-http",
+ // serving certificates
+ "compactor-http",
+ "compactor-grpc",
+ "distributor-http",
+ "distributor-grpc",
+ "index-gateway-http",
+ "index-gateway-grpc",
+ "ingester-http",
+ "ingester-grpc",
+ "querier-http",
+ "querier-grpc",
+ "query-frontend-http",
+ "query-frontend-grpc",
+ "ruler-http",
+ "ruler-grpc",
+ }
+
+ objsByName := map[string]client.Object{}
+ for _, name := range certNames {
+ secretName := fmt.Sprintf("%s-%s", stackName, name)
+
+ var obj client.Object
+ if strings.HasSuffix(name, "ca-bundle") {
+ obj = &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secretName,
+ Namespace: stackNamespace,
+ },
+ }
+ } else {
+ obj = &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secretName,
+ Namespace: stackNamespace,
+ },
+ }
+ }
+
+ objsByName[secretName] = obj
+ }
+
+ obj, ok := objsByName[name]
+ return obj, ok
+}
diff --git a/operator/internal/handlers/internal/serviceaccounts/serviceaccounts.go b/operator/internal/handlers/internal/serviceaccounts/serviceaccounts.go
new file mode 100644
index 0000000000000..1a803c03469a8
--- /dev/null
+++ b/operator/internal/handlers/internal/serviceaccounts/serviceaccounts.go
@@ -0,0 +1,22 @@
+package serviceaccounts
+
+import (
+ "context"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
+ "github.com/grafana/loki/operator/internal/external/k8s"
+ corev1 "k8s.io/api/core/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// GetUID return the server-side generated UID for a created serviceaccount to
+// associate with a Secret of type SecretServiceaccountTokenType. Returns an error if the
+// associated serviceaccount is not created or the get operation failed for any reason.
+func GetUID(ctx context.Context, k k8s.Client, key client.ObjectKey) (string, error) {
+ sa := corev1.ServiceAccount{}
+ if err := k.Get(ctx, key, &sa); err != nil {
+ return "", kverrors.Wrap(err, "failed to fetch associated serviceaccount uid", "key", key)
+ }
+
+ return string(sa.UID), nil
+}
diff --git a/operator/internal/handlers/lokistack_check_cert_expiry.go b/operator/internal/handlers/lokistack_check_cert_expiry.go
new file mode 100644
index 0000000000000..1bd2f9703a036
--- /dev/null
+++ b/operator/internal/handlers/lokistack_check_cert_expiry.go
@@ -0,0 +1,58 @@
+package handlers
+
+import (
+ "context"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
+ "github.com/go-logr/logr"
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/certrotation"
+ "github.com/grafana/loki/operator/internal/external/k8s"
+ "github.com/grafana/loki/operator/internal/handlers/internal/certificates"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ ctrl "sigs.k8s.io/controller-runtime"
+)
+
+// CheckCertExpiry handles the case if the LokiStack managed signing CA, client and/or serving
+// certificates expired. Returns true if any of those expired and an error representing the reason
+// of expiry.
+func CheckCertExpiry(ctx context.Context, log logr.Logger, req ctrl.Request, k k8s.Client, fg configv1.FeatureGates) error {
+ ll := log.WithValues("lokistack", req.String(), "event", "checkCertExpiry")
+
+ var stack lokiv1.LokiStack
+ if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
+ if apierrors.IsNotFound(err) {
+ // maybe the user deleted it before we could react? Either way this isn't an issue
+ ll.Error(err, "could not find the requested loki stack", "name", req.String())
+ return nil
+ }
+ return kverrors.Wrap(err, "failed to lookup lokistack", "name", req.String())
+ }
+
+ var mode lokiv1.ModeType
+ if stack.Spec.Tenants != nil {
+ mode = stack.Spec.Tenants.Mode
+ }
+
+ opts, err := certificates.GetOptions(ctx, k, req, mode)
+ if err != nil {
+ return kverrors.Wrap(err, "failed to lookup certificates secrets", "name", req.String())
+ }
+
+ if optErr := certrotation.ApplyDefaultSettings(&opts, fg.BuiltInCertManagement); optErr != nil {
+ ll.Error(optErr, "failed to conform options to build settings")
+ return optErr
+ }
+
+ if err := certrotation.SigningCAExpired(opts); err != nil {
+ return err
+ }
+
+ if err := certrotation.CertificatesExpired(opts); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/operator/internal/handlers/lokistack_check_cert_expiry_test.go b/operator/internal/handlers/lokistack_check_cert_expiry_test.go
new file mode 100644
index 0000000000000..dd94394af8c09
--- /dev/null
+++ b/operator/internal/handlers/lokistack_check_cert_expiry_test.go
@@ -0,0 +1,187 @@
+package handlers_test
+
+import (
+ "context"
+ "errors"
+ "testing"
+ "time"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/certrotation"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/handlers"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/pointer"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func TestCheckCertExpiry_WhenGetReturnsNotFound_DoesNotError(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ k.GetStub = func(ctx context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CheckCertExpiry(context.TODO(), logger, r, k, featureGates)
+ require.NoError(t, err)
+
+ // make sure create was NOT called because the Get failed
+ require.Zero(t, k.CreateCallCount())
+}
+
+func TestCheckCertExpiry_WhenGetReturnsAnErrorOtherThanNotFound_ReturnsTheError(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ badRequestErr := apierrors.NewBadRequest("you do not belong here")
+ k.GetStub = func(ctx context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ return badRequestErr
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CheckCertExpiry(context.TODO(), logger, r, k, featureGates)
+
+ require.Equal(t, badRequestErr, errors.Unwrap(err))
+
+ // make sure create was NOT called because the Get failed
+ require.Zero(t, k.CreateCallCount())
+}
+
+func TestCheckCertExpiry_WhenGetOptionsReturnsSignerNotFound_DoesNotError(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ stack := lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ }
+
+ k.GetStub = func(ctx context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ if name.Name == r.Name && name.Namespace == r.Namespace {
+ k.SetClientObject(object, &stack)
+ return nil
+ }
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CheckCertExpiry(context.TODO(), logger, r, k, featureGates)
+ require.NoError(t, err)
+
+ // make sure create was NOT called because the Get failed
+ require.Zero(t, k.CreateCallCount())
+}
+
+func TestCheckCertExpiry_WhenGetOptionsReturnsCABUndleNotFound_DoesNotError(t *testing.T) {
+ validNotAfter := time.Now().Add(600 * 24 * time.Hour).UTC().Format(time.RFC3339)
+ validNotBefore := time.Now().Add(600 * 24 * time.Hour).UTC().Format(time.RFC3339)
+
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ stack := lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ }
+
+ signer := corev1.Secret{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Secret",
+ APIVersion: "v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack-signing-ca",
+ Namespace: "some-ns",
+ Labels: map[string]string{
+ "app.kubernetes.io/name": "loki",
+ "app.kubernetes.io/provider": "openshift",
+ "loki.grafana.com/name": "my-stack",
+
+ // Add custom label to fake semantic not equal
+ "test": "test",
+ },
+ Annotations: map[string]string{
+ certrotation.CertificateIssuer: "dev_ns@signing-ca@10000",
+ certrotation.CertificateNotAfterAnnotation: validNotAfter,
+ certrotation.CertificateNotBeforeAnnotation: validNotBefore,
+ },
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: "loki.grafana.com/v1",
+ Kind: "LokiStack",
+ Name: "my-stack",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ Controller: pointer.Bool(true),
+ BlockOwnerDeletion: pointer.Bool(true),
+ },
+ },
+ },
+ }
+
+ k.GetStub = func(ctx context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ if name.Name == r.Name && name.Namespace == r.Namespace {
+ k.SetClientObject(object, &stack)
+ return nil
+ }
+ if name.Name == signer.Name && name.Namespace == signer.Namespace {
+ k.SetClientObject(object, &signer)
+ return nil
+ }
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CheckCertExpiry(context.TODO(), logger, r, k, featureGates)
+ require.NoError(t, err)
+
+ // make sure create was NOT called because the Get failed
+ require.Zero(t, k.CreateCallCount())
+}
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index 52df276ab01e3..af34ec1611a40 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -13,6 +13,7 @@ import (
"github.com/grafana/loki/operator/internal/handlers/internal/gateway"
"github.com/grafana/loki/operator/internal/handlers/internal/openshift"
"github.com/grafana/loki/operator/internal/handlers/internal/rules"
+ "github.com/grafana/loki/operator/internal/handlers/internal/serviceaccounts"
"github.com/grafana/loki/operator/internal/handlers/internal/storage"
"github.com/grafana/loki/operator/internal/handlers/internal/tlsprofile"
"github.com/grafana/loki/operator/internal/manifests"
@@ -29,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// CreateOrUpdateLokiStack handles LokiStack create and update events.
@@ -214,18 +216,24 @@ func CreateOrUpdateLokiStack(
}
+ certRotationRequiredAt := ""
+ if stack.Annotations != nil {
+ certRotationRequiredAt = stack.Annotations[manifests.AnnotationCertRotationRequiredAt]
+ }
+
// Here we will translate the lokiv1.LokiStack options into manifest options
opts := manifests.Options{
- Name: req.Name,
- Namespace: req.Namespace,
- Image: img,
- GatewayImage: gwImg,
- GatewayBaseDomain: baseDomain,
- Stack: stack.Spec,
- Gates: fg,
- ObjectStorage: *objStore,
- AlertingRules: alertingRules,
- RecordingRules: recordingRules,
+ Name: req.Name,
+ Namespace: req.Namespace,
+ Image: img,
+ GatewayImage: gwImg,
+ GatewayBaseDomain: baseDomain,
+ Stack: stack.Spec,
+ Gates: fg,
+ ObjectStorage: *objStore,
+ CertRotationRequiredAt: certRotationRequiredAt,
+ AlertingRules: alertingRules,
+ RecordingRules: recordingRules,
Ruler: manifests.Ruler{
Spec: rulerConfig,
Secret: rulerSecret,
@@ -308,8 +316,13 @@ func CreateOrUpdateLokiStack(
}
}
+ depAnnotations, err := dependentAnnotations(ctx, k, obj)
+ if err != nil {
+ return err
+ }
+
desired := obj.DeepCopyObject().(client.Object)
- mutateFn := manifests.MutateFuncFor(obj, desired)
+ mutateFn := manifests.MutateFuncFor(obj, desired, depAnnotations)
op, err := ctrl.CreateOrUpdate(ctx, k, obj, mutateFn)
if err != nil {
@@ -318,7 +331,13 @@ func CreateOrUpdateLokiStack(
continue
}
- l.Info(fmt.Sprintf("Resource has been %s", op))
+ msg := fmt.Sprintf("Resource has been %s", op)
+ switch op {
+ case ctrlutil.OperationResultNone:
+ l.V(1).Info(msg)
+ default:
+ l.Info(msg)
+ }
}
if errCount > 0 {
@@ -334,6 +353,24 @@ func CreateOrUpdateLokiStack(
return nil
}
+func dependentAnnotations(ctx context.Context, k k8s.Client, obj client.Object) (map[string]string, error) {
+ a := obj.GetAnnotations()
+ saName, ok := a[corev1.ServiceAccountNameKey]
+ if !ok || saName == "" {
+ return nil, nil
+ }
+
+ key := client.ObjectKey{Name: saName, Namespace: obj.GetNamespace()}
+ uid, err := serviceaccounts.GetUID(ctx, k, key)
+ if err != nil {
+ return nil, err
+ }
+
+ return map[string]string{
+ corev1.ServiceAccountUIDKey: uid,
+ }, nil
+}
+
func isNamespaceScoped(obj client.Object) bool {
switch obj.(type) {
case *rbacv1.ClusterRole, *rbacv1.ClusterRoleBinding:
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index 6ac288c30b1fe..8ca7e1f1fd3b8 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -39,8 +39,12 @@ var (
featureGates = configv1.FeatureGates{
ServiceMonitors: false,
ServiceMonitorTLSEndpoints: false,
- OpenShift: configv1.OpenShiftFeatureGates{
- ServingCertsService: false,
+ BuiltInCertManagement: configv1.BuiltInCertManagement{
+ Enabled: true,
+ CACertValidity: "10m",
+ CACertRefresh: "5m",
+ CertValidity: "2m",
+ CertRefresh: "1m",
},
}
diff --git a/operator/internal/handlers/lokistack_rotate_certs.go b/operator/internal/handlers/lokistack_rotate_certs.go
new file mode 100644
index 0000000000000..12a2405800257
--- /dev/null
+++ b/operator/internal/handlers/lokistack_rotate_certs.go
@@ -0,0 +1,110 @@
+package handlers
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/ViaQ/logerr/v2/kverrors"
+ "github.com/go-logr/logr"
+
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/certrotation"
+ "github.com/grafana/loki/operator/internal/external/k8s"
+ "github.com/grafana/loki/operator/internal/handlers/internal/certificates"
+ "github.com/grafana/loki/operator/internal/manifests"
+ "github.com/grafana/loki/operator/internal/status"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+// CreateOrRotateCertificates handles the LokiStack client and serving certificate creation and rotation
+// including the signing CA and a ca bundle or else returns an error. It returns only a degrade-condition-worthy
+// error if building the manifests fails for any reason.
+func CreateOrRotateCertificates(ctx context.Context, log logr.Logger, req ctrl.Request, k k8s.Client, s *runtime.Scheme, fg configv1.FeatureGates) error {
+ ll := log.WithValues("lokistack", req.String(), "event", "createOrRotateCerts")
+
+ var stack lokiv1.LokiStack
+ if err := k.Get(ctx, req.NamespacedName, &stack); err != nil {
+ if apierrors.IsNotFound(err) {
+ // maybe the user deleted it before we could react? Either way this isn't an issue
+ ll.Error(err, "could not find the requested LokiStack", "name", req.String())
+ return nil
+ }
+ return kverrors.Wrap(err, "failed to lookup LokiStack", "name", req.String())
+ }
+
+ var mode lokiv1.ModeType
+ if stack.Spec.Tenants != nil {
+ mode = stack.Spec.Tenants.Mode
+ }
+
+ opts, err := certificates.GetOptions(ctx, k, req, mode)
+ if err != nil {
+ return kverrors.Wrap(err, "failed to lookup certificates secrets", "name", req.String())
+ }
+
+ ll.Info("begin building certificate manifests")
+
+ if optErr := certrotation.ApplyDefaultSettings(&opts, fg.BuiltInCertManagement); optErr != nil {
+ ll.Error(optErr, "failed to conform options to build settings")
+ return optErr
+ }
+
+ objects, err := certrotation.BuildAll(opts)
+ if err != nil {
+ ll.Error(err, "failed to build certificate manifests")
+ return &status.DegradedError{
+ Message: "Failed to rotate TLS certificates",
+ Reason: lokiv1.ReasonFailedCertificateRotation,
+ Requeue: true,
+ }
+ }
+
+ ll.Info("certificate manifests built", "count", len(objects))
+
+ var errCount int32
+
+ for _, obj := range objects {
+ l := ll.WithValues(
+ "object_name", obj.GetName(),
+ "object_kind", obj.GetObjectKind(),
+ )
+
+ obj.SetNamespace(req.Namespace)
+
+ if err := ctrl.SetControllerReference(&stack, obj, s); err != nil {
+ l.Error(err, "failed to set controller owner reference to resource")
+ errCount++
+ continue
+ }
+
+ desired := obj.DeepCopyObject().(client.Object)
+ mutateFn := manifests.MutateFuncFor(obj, desired, nil)
+
+ op, err := ctrl.CreateOrUpdate(ctx, k, obj, mutateFn)
+ if err != nil {
+ l.Error(err, "failed to configure resource")
+ errCount++
+ continue
+ }
+
+ msg := fmt.Sprintf("Resource has been %s", op)
+ switch op {
+ case ctrlutil.OperationResultNone:
+ l.V(1).Info(msg)
+ default:
+ l.Info(msg)
+ }
+ }
+
+ if errCount > 0 {
+ return kverrors.New("failed to create or rotate LokiStack certificates", "name", req.String())
+ }
+
+ return nil
+}
diff --git a/operator/internal/handlers/lokistack_rotate_certs_test.go b/operator/internal/handlers/lokistack_rotate_certs_test.go
new file mode 100644
index 0000000000000..9c3507cf8b663
--- /dev/null
+++ b/operator/internal/handlers/lokistack_rotate_certs_test.go
@@ -0,0 +1,566 @@
+package handlers_test
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/grafana/loki/operator/internal/handlers"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/pointer"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func TestCreateOrRotateCertificates_WhenGetReturnsNotFound_DoesNotError(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ k.GetStub = func(ctx context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrRotateCertificates(context.TODO(), logger, r, k, scheme, featureGates)
+ require.NoError(t, err)
+
+ // make sure create was NOT called because the Get failed
+ require.Zero(t, k.CreateCallCount())
+}
+
+func TestCreateOrRotateCertificates_WhenGetReturnsAnErrorOtherThanNotFound_ReturnsTheError(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ badRequestErr := apierrors.NewBadRequest("you do not belong here")
+ k.GetStub = func(ctx context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ return badRequestErr
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrRotateCertificates(context.TODO(), logger, r, k, scheme, featureGates)
+
+ require.Equal(t, badRequestErr, errors.Unwrap(err))
+
+ // make sure create was NOT called because the Get failed
+ require.Zero(t, k.CreateCallCount())
+}
+
+func TestCreateOrRotateCertificates_SetsNamespaceOnAllObjects(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ stack := lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ },
+ Secret: lokiv1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1.ObjectStorageSecretS3,
+ },
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: "dynamic",
+ Authentication: []lokiv1.AuthenticationSpec{
+ {
+ TenantName: "test",
+ TenantID: "1234",
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
+ Name: defaultGatewaySecret.Name,
+ },
+ },
+ },
+ },
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
+ URL: "some-url",
+ },
+ },
+ },
+ },
+ }
+
+ k.GetStub = func(_ context.Context, name types.NamespacedName, out client.Object, _ ...client.GetOption) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(out, &stack)
+ return nil
+ }
+ if defaultSecret.Name == name.Name {
+ k.SetClientObject(out, &defaultSecret)
+ return nil
+ }
+ if defaultGatewaySecret.Name == name.Name {
+ k.SetClientObject(out, &defaultGatewaySecret)
+ return nil
+ }
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error {
+ assert.Equal(t, r.Namespace, o.GetNamespace())
+ return nil
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrRotateCertificates(context.TODO(), logger, r, k, scheme, featureGates)
+ require.NoError(t, err)
+
+ // make sure create was called
+ require.NotZero(t, k.CreateCallCount())
+}
+
+func TestCreateOrRotateCertificates_SetsOwnerRefOnAllObjects(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ stack := lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "someStack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ },
+ Secret: lokiv1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1.ObjectStorageSecretS3,
+ },
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: "dynamic",
+ Authentication: []lokiv1.AuthenticationSpec{
+ {
+ TenantName: "test",
+ TenantID: "1234",
+ OIDC: &lokiv1.OIDCSpec{
+ Secret: &lokiv1.TenantSecretSpec{
+ Name: defaultGatewaySecret.Name,
+ },
+ },
+ },
+ },
+ Authorization: &lokiv1.AuthorizationSpec{
+ OPA: &lokiv1.OPASpec{
+ URL: "some-url",
+ },
+ },
+ },
+ },
+ }
+
+ // Create looks up the CR first, so we need to return our fake stack
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(object, &stack)
+ return nil
+ }
+ if defaultSecret.Name == name.Name {
+ k.SetClientObject(object, &defaultSecret)
+ return nil
+ }
+ if defaultGatewaySecret.Name == name.Name {
+ k.SetClientObject(object, &defaultGatewaySecret)
+ return nil
+ }
+ return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
+ }
+
+ expected := metav1.OwnerReference{
+ APIVersion: lokiv1.GroupVersion.String(),
+ Kind: stack.Kind,
+ Name: stack.Name,
+ UID: stack.UID,
+ Controller: pointer.BoolPtr(true),
+ BlockOwnerDeletion: pointer.BoolPtr(true),
+ }
+
+ k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error {
+ // OwnerRefs are appended so we have to find ours in the list
+ var ref metav1.OwnerReference
+ var found bool
+ for _, or := range o.GetOwnerReferences() {
+ if or.UID == stack.UID {
+ found = true
+ ref = or
+ break
+ }
+ }
+
+ require.True(t, found, "expected to find a matching ownerRef, but did not")
+ require.EqualValues(t, expected, ref)
+ return nil
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrRotateCertificates(context.TODO(), logger, r, k, scheme, featureGates)
+ require.NoError(t, err)
+
+ // make sure create was called
+ require.NotZero(t, k.CreateCallCount())
+}
+
+func TestCreateOrRotateCertificates_WhenSetControllerRefInvalid_ContinueWithOtherObjects(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ stack := lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "someStack",
+ // Set invalid namespace here, because
+ // because cross-namespace controller
+ // references are not allowed
+ Namespace: "invalid-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ },
+ Secret: lokiv1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1.ObjectStorageSecretS3,
+ },
+ },
+ },
+ }
+
+ // Create looks up the CR first, so we need to return our fake stack
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(object, &stack)
+ }
+ if defaultSecret.Name == name.Name {
+ k.SetClientObject(object, &defaultSecret)
+ }
+ return nil
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrRotateCertificates(context.TODO(), logger, r, k, scheme, featureGates)
+
+ // make sure error is returned to re-trigger reconciliation
+ require.Error(t, err)
+}
+
+func TestCreateOrRotateCertificates_WhenGetReturnsNoError_UpdateObjects(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ stack := lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ },
+ Secret: lokiv1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1.ObjectStorageSecretS3,
+ },
+ },
+ },
+ }
+
+ secret := corev1.Secret{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Secret",
+ APIVersion: "v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack-signing-ca",
+ Namespace: "some-ns",
+ Labels: map[string]string{
+ "app.kubernetes.io/name": "loki",
+ "app.kubernetes.io/provider": "openshift",
+ "loki.grafana.com/name": "my-stack",
+
+ // Add custom label to fake semantic not equal
+ "test": "test",
+ },
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: "loki.grafana.com/v1",
+ Kind: "LokiStack",
+ Name: "my-stack",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ Controller: pointer.BoolPtr(true),
+ BlockOwnerDeletion: pointer.BoolPtr(true),
+ },
+ },
+ },
+ }
+
+ // Create looks up the CR first, so we need to return our fake stack
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(object, &stack)
+ }
+ if defaultSecret.Name == name.Name {
+ k.SetClientObject(object, &defaultSecret)
+ }
+ if secret.Name == name.Name && secret.Namespace == name.Namespace {
+ k.SetClientObject(object, &secret)
+ }
+ return nil
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrRotateCertificates(context.TODO(), logger, r, k, scheme, featureGates)
+ require.NoError(t, err)
+
+ // make sure create not called
+ require.Zero(t, k.CreateCallCount())
+
+ // make sure update was called
+ require.NotZero(t, k.UpdateCallCount())
+}
+
+func TestCreateOrRotateCertificats_WhenCreateReturnsError_ContinueWithOtherObjects(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ stack := lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "someStack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ },
+ Secret: lokiv1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1.ObjectStorageSecretS3,
+ },
+ },
+ },
+ }
+
+ // GetStub looks up the CR first, so we need to return our fake stack
+ // return NotFound for everything else to trigger create.
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(object, &stack)
+ return nil
+ }
+ if defaultSecret.Name == name.Name {
+ k.SetClientObject(object, &defaultSecret)
+ return nil
+ }
+ return apierrors.NewNotFound(schema.GroupResource{}, "something is not found")
+ }
+
+ // CreateStub returns an error for each resource to trigger reconciliation a new.
+ k.CreateStub = func(_ context.Context, o client.Object, _ ...client.CreateOption) error {
+ return apierrors.NewTooManyRequestsError("too many create requests")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrRotateCertificates(context.TODO(), logger, r, k, scheme, featureGates)
+
+ // make sure error is returned to re-trigger reconciliation
+ require.Error(t, err)
+}
+
+func TestCreateOrRotateCertificates_WhenUpdateReturnsError_ContinueWithOtherObjects(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ stack := lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV11,
+ EffectiveDate: "2020-10-11",
+ },
+ },
+ Secret: lokiv1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1.ObjectStorageSecretS3,
+ },
+ },
+ },
+ }
+
+ secret := corev1.Secret{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Secret",
+ APIVersion: "v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack-signing-ca",
+ Namespace: "some-ns",
+ Labels: map[string]string{
+ "app.kubernetes.io/name": "loki",
+ "app.kubernetes.io/provider": "openshift",
+ "loki.grafana.com/name": "my-stack",
+
+ // Add custom label to fake semantic not equal
+ "test": "test",
+ },
+ OwnerReferences: []metav1.OwnerReference{
+ {
+ APIVersion: "loki.grafana.com/v1",
+ Kind: "LokiStack",
+ Name: "someStack",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ Controller: pointer.BoolPtr(true),
+ BlockOwnerDeletion: pointer.BoolPtr(true),
+ },
+ },
+ },
+ }
+
+ // GetStub looks up the CR first, so we need to return our fake stack
+ // return NotFound for everything else to trigger create.
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(object, &stack)
+ }
+ if defaultSecret.Name == name.Name {
+ k.SetClientObject(object, &defaultSecret)
+ }
+ if secret.Name == name.Name && secret.Namespace == name.Namespace {
+ k.SetClientObject(object, &secret)
+ }
+ return nil
+ }
+
+ // CreateStub returns an error for each resource to trigger reconciliation a new.
+ k.UpdateStub = func(_ context.Context, o client.Object, _ ...client.UpdateOption) error {
+ return apierrors.NewTooManyRequestsError("too many create requests")
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrRotateCertificates(context.TODO(), logger, r, k, scheme, featureGates)
+
+ // make sure error is returned to re-trigger reconciliation
+ require.Error(t, err)
+}
diff --git a/operator/internal/manifests/build.go b/operator/internal/manifests/build.go
index 44101d2080398..68781b16be917 100644
--- a/operator/internal/manifests/build.go
+++ b/operator/internal/manifests/build.go
@@ -85,10 +85,6 @@ func BuildAll(opts Options) ([]client.Object, error) {
res = append(res, gatewayObjects...)
}
- if opts.Stack.Tenants != nil {
- res = configureLokiStackObjsForMode(res, opts)
- }
-
if opts.Gates.ServiceMonitors {
res = append(res, BuildServiceMonitors(opts)...)
}
diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go
index 0132616ac656d..58aea0fbb4a79 100644
--- a/operator/internal/manifests/build_test.go
+++ b/operator/internal/manifests/build_test.go
@@ -320,20 +320,6 @@ func TestBuildAll_WithFeatureGates_OpenShift_ServingCertsService(t *testing.T) {
require.NoError(t, err)
svcs := []*corev1.Service{
- NewDistributorGRPCService(tst.BuildOptions),
- NewDistributorHTTPService(tst.BuildOptions),
- NewIngesterGRPCService(tst.BuildOptions),
- NewIngesterHTTPService(tst.BuildOptions),
- NewQuerierGRPCService(tst.BuildOptions),
- NewQuerierHTTPService(tst.BuildOptions),
- NewQueryFrontendGRPCService(tst.BuildOptions),
- NewQueryFrontendHTTPService(tst.BuildOptions),
- NewCompactorGRPCService(tst.BuildOptions),
- NewCompactorHTTPService(tst.BuildOptions),
- NewIndexGatewayGRPCService(tst.BuildOptions),
- NewIndexGatewayHTTPService(tst.BuildOptions),
- NewRulerHTTPService(tst.BuildOptions),
- NewRulerGRPCService(tst.BuildOptions),
NewGatewayHTTPService(tst.BuildOptions),
}
@@ -421,14 +407,14 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
expVolumeMount := corev1.VolumeMount{
Name: secretName,
ReadOnly: false,
- MountPath: "/var/run/tls/http",
+ MountPath: "/var/run/tls/http/server",
}
require.Contains(t, vms, expVolumeMount)
require.Contains(t, args, "-server.tls-min-version=VersionTLS12")
require.Contains(t, args, fmt.Sprintf("-server.tls-cipher-suites=%s", ciphers))
- require.Contains(t, args, "-server.http-tls-cert-path=/var/run/tls/http/tls.crt")
- require.Contains(t, args, "-server.http-tls-key-path=/var/run/tls/http/tls.key")
+ require.Contains(t, args, "-server.http-tls-cert-path=/var/run/tls/http/server/tls.crt")
+ require.Contains(t, args, "-server.http-tls-key-path=/var/run/tls/http/server/tls.key")
require.Equal(t, corev1.URISchemeHTTPS, rps)
require.Equal(t, corev1.URISchemeHTTPS, lps)
}
@@ -500,12 +486,12 @@ func TestBuildAll_WithFeatureGates_ServiceMonitorTLSEndpoints(t *testing.T) {
expVolumeMount := corev1.VolumeMount{
Name: secretName,
ReadOnly: false,
- MountPath: "/var/run/tls/http",
+ MountPath: "/var/run/tls/http/server",
}
require.Contains(t, vms, expVolumeMount)
- require.Contains(t, args, "-server.http-tls-cert-path=/var/run/tls/http/tls.crt")
- require.Contains(t, args, "-server.http-tls-key-path=/var/run/tls/http/tls.key")
+ require.Contains(t, args, "-server.http-tls-cert-path=/var/run/tls/http/server/tls.crt")
+ require.Contains(t, args, "-server.http-tls-key-path=/var/run/tls/http/server/tls.key")
require.Equal(t, corev1.URISchemeHTTPS, rps)
require.Equal(t, corev1.URISchemeHTTPS, lps)
}
@@ -658,8 +644,8 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
t.Run(name, func(t *testing.T) {
secretName := secretsMap[name]
args := []string{
- "-server.grpc-tls-cert-path=/var/run/tls/grpc/tls.crt",
- "-server.grpc-tls-key-path=/var/run/tls/grpc/tls.key",
+ "-server.grpc-tls-cert-path=/var/run/tls/grpc/server/tls.crt",
+ "-server.grpc-tls-key-path=/var/run/tls/grpc/server/tls.key",
"-server.tls-min-version=VersionTLS12",
fmt.Sprintf("-server.tls-cipher-suites=%s", ciphers),
}
@@ -667,7 +653,7 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
vm := corev1.VolumeMount{
Name: secretName,
ReadOnly: false,
- MountPath: "/var/run/tls/grpc",
+ MountPath: "/var/run/tls/grpc/server",
}
v := corev1.Volume{
diff --git a/operator/internal/manifests/compactor.go b/operator/internal/manifests/compactor.go
index 11874f36baede..c32a76fa65497 100644
--- a/operator/internal/manifests/compactor.go
+++ b/operator/internal/manifests/compactor.go
@@ -36,6 +36,13 @@ func BuildCompactor(opts Options) ([]client.Object, error) {
}
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ caBundleName := signingCABundleName(opts.Name)
+ if err := configureServiceCA(&statefulSet.Spec.Template.Spec, caBundleName); err != nil {
+ return nil, err
+ }
+ }
+
return []client.Object{
statefulSet,
NewCompactorGRPCService(opts),
@@ -121,7 +128,7 @@ func NewCompactorStatefulSet(opts Options) *appsv1.StatefulSet {
}
l := ComponentLabels(LabelCompactorComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1)
+ a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
return &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
@@ -182,9 +189,8 @@ func NewCompactorGRPCService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
@@ -212,9 +218,8 @@ func NewCompactorHTTPService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
@@ -232,7 +237,7 @@ func NewCompactorHTTPService(opts Options) *corev1.Service {
func configureCompactorHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameCompactorHTTP(opts.Name)
- return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
}
func configureCompactorGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
diff --git a/operator/internal/manifests/compactor_test.go b/operator/internal/manifests/compactor_test.go
index 81fc8c7562830..1dbaf2cd6129e 100644
--- a/operator/internal/manifests/compactor_test.go
+++ b/operator/internal/manifests/compactor_test.go
@@ -54,3 +54,23 @@ func TestNewCompactorStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
require.Contains(t, annotations, expected)
require.Equal(t, annotations[expected], "deadbeef")
}
+
+func TestNewCompactorStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
+ ss := manifests.NewCompactorStatefulSet(manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ CertRotationRequiredAt: "deadbeef",
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+ expected := "loki.grafana.com/certRotationRequiredAt"
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, expected)
+ require.Equal(t, annotations[expected], "deadbeef")
+}
diff --git a/operator/internal/manifests/distributor.go b/operator/internal/manifests/distributor.go
index 174bb6dde33bb..afba9c8798e32 100644
--- a/operator/internal/manifests/distributor.go
+++ b/operator/internal/manifests/distributor.go
@@ -32,6 +32,13 @@ func BuildDistributor(opts Options) ([]client.Object, error) {
}
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ caBundleName := signingCABundleName(opts.Name)
+ if err := configureServiceCA(&deployment.Spec.Template.Spec, caBundleName); err != nil {
+ return nil, err
+ }
+ }
+
return []client.Object{
deployment,
NewDistributorGRPCService(opts),
@@ -117,7 +124,7 @@ func NewDistributorDeployment(opts Options) *appsv1.Deployment {
}
l := ComponentLabels(LabelDistributorComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1)
+ a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -159,9 +166,8 @@ func NewDistributorGRPCService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
@@ -189,9 +195,8 @@ func NewDistributorHTTPService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
@@ -209,48 +214,23 @@ func NewDistributorHTTPService(opts Options) *corev1.Service {
func configureDistributorHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
serviceName := serviceNameDistributorHTTP(opts.Name)
- return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
}
func configureDistributorGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
- caBundleName := signingCABundleName(opts.Name)
- secretVolumeSpec := corev1.PodSpec{
- Volumes: []corev1.Volume{
- {
- Name: caBundleName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: caBundleName,
- },
- },
- },
- },
- },
- }
-
secretContainerSpec := corev1.Container{
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: caBundleName,
- ReadOnly: false,
- MountPath: caBundleDir,
- },
- },
Args: []string{
// Enable GRPC over TLS for ingester client
"-ingester.client.tls-enabled=true",
fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
},
}
- if err := mergo.Merge(&deployment.Spec.Template.Spec, secretVolumeSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge volumes")
- }
-
if err := mergo.Merge(&deployment.Spec.Template.Spec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge container")
}
diff --git a/operator/internal/manifests/distributor_test.go b/operator/internal/manifests/distributor_test.go
index acbdfc2cc55ae..876c5406756c4 100644
--- a/operator/internal/manifests/distributor_test.go
+++ b/operator/internal/manifests/distributor_test.go
@@ -28,7 +28,7 @@ func TestNewDistributorDeployment_SelectorMatchesLabels(t *testing.T) {
}
}
-func TestNewDistributorDeployme_HasTemplateConfigHashAnnotation(t *testing.T) {
+func TestNewDistributorDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
ss := manifests.NewDistributorDeployment(manifests.Options{
Name: "abcd",
Namespace: "efgh",
@@ -47,3 +47,23 @@ func TestNewDistributorDeployme_HasTemplateConfigHashAnnotation(t *testing.T) {
require.Contains(t, annotations, expected)
require.Equal(t, annotations[expected], "deadbeef")
}
+
+func TestNewDistributorDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
+ ss := manifests.NewDistributorDeployment(manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ CertRotationRequiredAt: "deadbeef",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Distributor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
+ expected := "loki.grafana.com/certRotationRequiredAt"
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, expected)
+ require.Equal(t, annotations[expected], "deadbeef")
+}
diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go
index 39e323d85525e..7790de834ae37 100644
--- a/operator/internal/manifests/gateway.go
+++ b/operator/internal/manifests/gateway.go
@@ -4,6 +4,7 @@ import (
"crypto/sha1"
"fmt"
"path"
+ "regexp"
"strings"
"github.com/ViaQ/logerr/v2/kverrors"
@@ -25,6 +26,8 @@ const (
tlsSecretVolume = "tls-secret"
)
+var logsEndpointRe = regexp.MustCompile(`^--logs\.(?:read|tail|write|rules)\.endpoint=http://.+`)
+
// BuildGateway returns a list of k8s objects for Loki Stack Gateway
func BuildGateway(opts Options) ([]client.Object, error) {
cm, sha1C, err := gatewayConfigMap(opts)
@@ -33,6 +36,8 @@ func BuildGateway(opts Options) ([]client.Object, error) {
}
dpl := NewGatewayDeployment(opts, sha1C)
+ sa := NewServiceAccount(opts)
+ saToken := NewServiceAccountTokenSecret(opts)
svc := NewGatewayHTTPService(opts)
ing, err := NewGatewayIngress(opts)
@@ -40,29 +45,31 @@ func BuildGateway(opts Options) ([]client.Object, error) {
return nil, err
}
- objs := []client.Object{cm, dpl, svc, ing}
+ objs := []client.Object{cm, dpl, sa, saToken, svc, ing}
minTLSVersion := opts.TLSProfile.MinTLSVersion
ciphersList := opts.TLSProfile.Ciphers
ciphers := strings.Join(ciphersList, `,`)
- if opts.Gates.HTTPEncryption {
- serviceName := serviceNameGatewayHTTP(opts.Name)
- if err := configureGatewayMetricsPKI(&dpl.Spec.Template.Spec, serviceName, minTLSVersion, ciphers); err != nil {
+ if opts.Stack.Rules != nil && opts.Stack.Rules.Enabled {
+ if err := configureGatewayRulesAPI(&dpl.Spec.Template.Spec, opts.Name, opts.Namespace); err != nil {
return nil, err
}
}
- if opts.Stack.Rules != nil && opts.Stack.Rules.Enabled {
- if err := configureGatewayRulesAPI(&dpl.Spec.Template.Spec, opts.Name, opts.Namespace); err != nil {
+ if opts.Gates.HTTPEncryption {
+ serviceName := serviceNameGatewayHTTP(opts.Name)
+ serverCAName := gatewaySigningCABundleName(GatewayName(opts.Name))
+ upstreamCAName := signingCABundleName(opts.Name)
+ upstreamClientName := gatewayClientSecretName(opts.Name)
+ if err := configureGatewayServerPKI(&dpl.Spec.Template.Spec, opts.Namespace, serviceName, serverCAName, upstreamCAName, upstreamClientName, minTLSVersion, ciphers); err != nil {
return nil, err
}
}
if opts.Stack.Tenants != nil {
mode := opts.Stack.Tenants.Mode
-
- if err := configureGatewayDeploymentForMode(dpl, mode, opts.Gates, opts.Name, opts.Namespace, minTLSVersion, ciphers); err != nil {
+ if err := configureGatewayDeploymentForMode(dpl, mode, opts.Gates, minTLSVersion, ciphers); err != nil {
return nil, err
}
@@ -81,7 +88,8 @@ func BuildGateway(opts Options) ([]client.Object, error) {
// NewGatewayDeployment creates a deployment object for a lokiStack-gateway
func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
podSpec := corev1.PodSpec{
- Affinity: defaultAffinity(opts.Gates.DefaultNodeAffinity),
+ ServiceAccountName: GatewayName(opts.Name),
+ Affinity: defaultAffinity(opts.Gates.DefaultNodeAffinity),
Volumes: []corev1.Volume{
{
Name: "rbac",
@@ -193,7 +201,7 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
}
l := ComponentLabels(LabelGatewayComponent, opts.Name)
- a := commonAnnotations(sha1C)
+ a := commonAnnotations(sha1C, opts.CertRotationRequiredAt)
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -301,6 +309,46 @@ func NewGatewayIngress(opts Options) (*networkingv1.Ingress, error) {
}, nil
}
+// NewServiceAccount returns a k8s object for the LokiStack Gateway
+// serviceaccount.
+func NewServiceAccount(opts Options) client.Object {
+ l := ComponentLabels(LabelGatewayComponent, opts.Name)
+ return &corev1.ServiceAccount{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ServiceAccount",
+ APIVersion: corev1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: l,
+ Name: GatewayName(opts.Name),
+ Namespace: opts.Namespace,
+ },
+ AutomountServiceAccountToken: pointer.Bool(true),
+ }
+}
+
+// NewServiceAccountTokenSecret returns a k8s object for the LokiStack
+// Gateway secret. This secret represents the ServiceAccountToken.
+func NewServiceAccountTokenSecret(opts Options) client.Object {
+ l := ComponentLabels(LabelGatewayComponent, opts.Name)
+
+ return &corev1.Secret{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Secret",
+ APIVersion: corev1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ corev1.ServiceAccountNameKey: GatewayName(opts.Name),
+ },
+ Labels: l,
+ Name: gatewayTokenSecretName(GatewayName(opts.Name)),
+ Namespace: opts.Namespace,
+ },
+ Type: corev1.SecretTypeServiceAccountToken,
+ }
+}
+
// gatewayConfigMap creates a configMap for rbac.yaml and tenants.yaml
func gatewayConfigMap(opt Options) (*corev1.ConfigMap, string, error) {
cfg := gatewayConfigOptions(opt)
@@ -355,7 +403,12 @@ func gatewayConfigOptions(opt Options) gateway.Options {
}
}
-func configureGatewayMetricsPKI(podSpec *corev1.PodSpec, serviceName, minTLSVersion, ciphers string) error {
+func configureGatewayServerPKI(
+ podSpec *corev1.PodSpec,
+ namespace, serviceName, serverCAName string,
+ upstreamCAName, upstreamClientName string,
+ minTLSVersion, ciphers string,
+) error {
var gwIndex int
for i, c := range podSpec.Containers {
if c.Name == gatewayContainerName {
@@ -364,63 +417,115 @@ func configureGatewayMetricsPKI(podSpec *corev1.PodSpec, serviceName, minTLSVers
}
}
- certFile := path.Join(httpTLSDir, tlsCertFile)
- keyFile := path.Join(httpTLSDir, tlsKeyFile)
+ gwContainer := podSpec.Containers[gwIndex].DeepCopy()
+ gwArgs := gwContainer.Args
+ gwVolumes := podSpec.Volumes
- secretVolumeSpec := corev1.PodSpec{
- Volumes: []corev1.Volume{
- {
- Name: tlsSecretVolume,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: serviceName,
- },
+ for i, a := range gwArgs {
+ if strings.HasPrefix(a, "--web.healthchecks.url=") {
+ gwArgs[i] = fmt.Sprintf("--web.healthchecks.url=https://localhost:%d", gatewayHTTPPort)
+ }
+
+ if logsEndpointRe.MatchString(a) {
+ gwArgs[i] = strings.Replace(a, "http", "https", 1)
+ }
+ }
+
+ serverName := fqdn(serviceName, namespace)
+ gwArgs = append(gwArgs,
+ "--tls.client-auth-type=NoClientCert",
+ "--tls.min-version=VersionTLS12",
+ fmt.Sprintf("--tls.server.cert-file=%s", gatewayServerHTTPTLSCert()),
+ fmt.Sprintf("--tls.server.key-file=%s", gatewayServerHTTPTLSKey()),
+ fmt.Sprintf("--tls.healthchecks.server-ca-file=%s", gatewaySigningCAPath()),
+ fmt.Sprintf("--tls.healthchecks.server-name=%s", serverName),
+ fmt.Sprintf("--tls.internal.server.cert-file=%s", gatewayServerHTTPTLSCert()),
+ fmt.Sprintf("--tls.internal.server.key-file=%s", gatewayServerHTTPTLSKey()),
+ fmt.Sprintf("--tls.min-version=%s", minTLSVersion),
+ fmt.Sprintf("--tls.cipher-suites=%s", ciphers),
+ fmt.Sprintf("--logs.tls.ca-file=%s", gatewayUpstreamCAPath()),
+ fmt.Sprintf("--logs.tls.cert-file=%s", gatewayUpstreamHTTPTLSCert()),
+ fmt.Sprintf("--logs.tls.key-file=%s", gatewayUpstreamHTTPTLSKey()),
+ )
+
+ gwContainer.ReadinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS
+ gwContainer.LivenessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS
+ gwContainer.Args = gwArgs
+
+ gwVolumes = append(gwVolumes,
+ corev1.Volume{
+ Name: tlsSecretVolume,
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceName,
},
},
},
- }
- secretContainerSpec := corev1.Container{
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
+ corev1.Volume{
+ Name: upstreamClientName,
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: upstreamClientName,
+ },
},
},
- Args: []string{
- fmt.Sprintf("--tls.internal.server.cert-file=%s", certFile),
- fmt.Sprintf("--tls.internal.server.key-file=%s", keyFile),
- fmt.Sprintf("--tls.min-version=%s", minTLSVersion),
- fmt.Sprintf("--tls.cipher-suites=%s", ciphers),
- },
- }
- uriSchemeContainerSpec := corev1.Container{
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
+ corev1.Volume{
+ Name: upstreamCAName,
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ DefaultMode: &defaultConfigMapMode,
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: upstreamCAName,
+ },
},
},
},
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
+ corev1.Volume{
+ Name: serverCAName,
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ DefaultMode: &defaultConfigMapMode,
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: serverCAName,
+ },
},
},
},
- }
-
- if err := mergo.Merge(podSpec, secretVolumeSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge volumes")
- }
+ )
+
+ gwContainer.VolumeMounts = append(
+ gwContainer.VolumeMounts,
+ corev1.VolumeMount{
+ Name: tlsSecretVolume,
+ ReadOnly: true,
+ MountPath: gatewayServerHTTPTLSDir(),
+ },
+ corev1.VolumeMount{
+ Name: upstreamClientName,
+ ReadOnly: true,
+ MountPath: gatewayUpstreamHTTPTLSDir(),
+ },
+ corev1.VolumeMount{
+ Name: upstreamCAName,
+ ReadOnly: true,
+ MountPath: gatewayUpstreamCADir(),
+ },
+ corev1.VolumeMount{
+ Name: serverCAName,
+ ReadOnly: true,
+ MountPath: gatewaySigningCADir(),
+ },
+ )
- if err := mergo.Merge(&podSpec.Containers[gwIndex], secretContainerSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge container")
+ p := corev1.PodSpec{
+ Containers: []corev1.Container{
+ *gwContainer,
+ },
+ Volumes: gwVolumes,
}
- if err := mergo.Merge(&podSpec.Containers[gwIndex], uriSchemeContainerSpec, mergo.WithOverride); err != nil {
- return kverrors.Wrap(err, "failed to merge container")
+ if err := mergo.Merge(podSpec, p, mergo.WithOverride); err != nil {
+ return kverrors.Wrap(err, "failed to merge server pki into container spec ")
}
return nil
diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go
index 6adcc11d5f670..9598dba13f81f 100644
--- a/operator/internal/manifests/gateway_tenants.go
+++ b/operator/internal/manifests/gateway_tenants.go
@@ -59,38 +59,13 @@ func ApplyGatewayDefaultOptions(opts *Options) error {
return nil
}
-func configureGatewayDeploymentForMode(
- d *appsv1.Deployment, mode lokiv1.ModeType,
- fg configv1.FeatureGates, stackName, stackNs string,
- minTLSVersion string, ciphers string,
-) error {
+func configureGatewayDeploymentForMode(d *appsv1.Deployment, mode lokiv1.ModeType, fg configv1.FeatureGates, minTLSVersion string, ciphers string) error {
switch mode {
case lokiv1.Static, lokiv1.Dynamic:
return nil // nothing to configure
case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
- caBundleName := signingCABundleName(stackName)
- serviceName := serviceNameGatewayHTTP(stackName)
- secretName := signingServiceSecretName(serviceName)
- serverName := fqdn(serviceName, stackNs)
- return openshift.ConfigureGatewayDeployment(
- d,
- mode,
- gatewayContainerName,
- tlsSecretVolume,
- httpTLSDir,
- tlsCertFile,
- tlsKeyFile,
- caBundleName,
- caBundleDir,
- caFile,
- fg.HTTPEncryption,
- fg.OpenShift.ServingCertsService,
- secretName,
- serverName,
- gatewayHTTPPort,
- minTLSVersion,
- ciphers,
- )
+ tlsDir := gatewayServerHTTPTLSDir()
+ return openshift.ConfigureGatewayDeployment(d, mode, tlsSecretVolume, tlsDir, minTLSVersion, ciphers, fg.HTTPEncryption)
}
return nil
@@ -107,23 +82,25 @@ func configureGatewayServiceForMode(s *corev1.ServiceSpec, mode lokiv1.ModeType)
return nil
}
-func configureLokiStackObjsForMode(objs []client.Object, opts Options) []client.Object {
- switch opts.Stack.Tenants.Mode {
- case lokiv1.Static, lokiv1.Dynamic:
- // nothing to configure
- case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
- openShiftObjs := openshift.BuildLokiStackObjects(opts.OpenShiftOptions)
- objs = append(objs, openShiftObjs...)
- }
-
- return objs
-}
-
func configureGatewayObjsForMode(objs []client.Object, opts Options) []client.Object {
switch opts.Stack.Tenants.Mode {
case lokiv1.Static, lokiv1.Dynamic:
// nothing to configure
case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
+ for _, o := range objs {
+ switch sa := o.(type) {
+ case *corev1.ServiceAccount:
+ if sa.Annotations == nil {
+ sa.Annotations = map[string]string{}
+ }
+
+ a := openshift.ServiceAccountAnnotations(opts.OpenShiftOptions)
+ for key, value := range a {
+ sa.Annotations[key] = value
+ }
+ }
+ }
+
openShiftObjs := openshift.BuildGatewayObjects(opts.OpenShiftOptions)
var cObjs []client.Object
@@ -145,12 +122,12 @@ func configureGatewayObjsForMode(objs []client.Object, opts Options) []client.Ob
return objs
}
-func configureGatewayServiceMonitorForMode(sm *monitoringv1.ServiceMonitor, mode lokiv1.ModeType, fg configv1.FeatureGates) error {
- switch mode {
+func configureGatewayServiceMonitorForMode(sm *monitoringv1.ServiceMonitor, opts Options) error {
+ switch opts.Stack.Tenants.Mode {
case lokiv1.Static, lokiv1.Dynamic:
return nil // nothing to configure
case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
- return openshift.ConfigureGatewayServiceMonitor(sm, fg.ServiceMonitorTLSEndpoints)
+ return openshift.ConfigureGatewayServiceMonitor(sm, opts.Gates.ServiceMonitorTLSEndpoints)
}
return nil
diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go
index c50101f3bd6b9..da6c3813c50d9 100644
--- a/operator/internal/manifests/gateway_tenants_test.go
+++ b/operator/internal/manifests/gateway_tenants_test.go
@@ -1,7 +1,7 @@
package manifests
import (
- "fmt"
+ "path"
"testing"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
@@ -280,26 +280,6 @@ func TestConfigureDeploymentForMode(t *testing.T) {
Containers: []corev1.Container{
{
Name: gatewayContainerName,
- Args: []string{
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayHTTPPort),
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
},
},
},
@@ -316,40 +296,6 @@ func TestConfigureDeploymentForMode(t *testing.T) {
Containers: []corev1.Container{
{
Name: gatewayContainerName,
- Args: []string{
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=https://localhost:%d", gatewayHTTPPort),
- "--tls.client-auth-type=NoClientCert",
- "--tls.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.server.key-file=/var/run/tls/http/tls.key",
- "--tls.healthchecks.server-ca-file=/var/run/ca/service-ca.crt",
- fmt.Sprintf("--tls.healthchecks.server-name=%s", "test-gateway-http.test-ns.svc.cluster.local"),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
},
{
Name: "opa",
@@ -405,23 +351,13 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
},
},
- Volumes: []corev1.Volume{
- {
- Name: tlsSecretVolume,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "test-gateway-http-tls",
- },
- },
- },
- },
},
},
},
},
},
{
- desc: "openshift-logging mode with-tls-service-monitor-config",
+ desc: "openshift-logging mode with http encryption",
mode: lokiv1.OpenshiftLogging,
stackName: "test",
stackNs: "test-ns",
@@ -431,6 +367,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
dpl: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
+ Name: "test-gateway",
Namespace: "test-ns",
},
Spec: appsv1.DeploymentSpec{
@@ -439,229 +376,6 @@ func TestConfigureDeploymentForMode(t *testing.T) {
Containers: []corev1.Container{
{
Name: gatewayContainerName,
- Args: []string{
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayHTTPPort),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
- },
- },
- Volumes: []corev1.Volume{
- {
- Name: tlsSecretVolume,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "test-gateway-http-tls",
- },
- },
- },
- },
- },
- },
- },
- },
- want: &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: "test-ns",
- },
- Spec: appsv1.DeploymentSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: gatewayContainerName,
- Args: []string{
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=https://localhost:%d", gatewayHTTPPort),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- "--tls.client-auth-type=NoClientCert",
- "--tls.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.server.key-file=/var/run/tls/http/tls.key",
- "--tls.healthchecks.server-ca-file=/var/run/ca/service-ca.crt",
- fmt.Sprintf("--tls.healthchecks.server-name=%s", "test-gateway-http.test-ns.svc.cluster.local"),
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- },
- {
- Name: "opa",
- Image: "quay.io/observatorium/opa-openshift:latest",
- Args: []string{
- "--log.level=warn",
- "--opa.skip-tenants=audit,infrastructure",
- "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin",
- "--web.listen=:8082",
- "--web.internal.listen=:8083",
- "--web.healthchecks.url=http://localhost:8082",
- "--opa.package=lokistack",
- "--opa.matcher=kubernetes_namespace_name",
- "--tls.internal.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.internal.server.key-file=/var/run/tls/http/tls.key",
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- `--openshift.mappings=application=loki.grafana.com`,
- `--openshift.mappings=infrastructure=loki.grafana.com`,
- `--openshift.mappings=audit=loki.grafana.com`,
- },
- Ports: []corev1.ContainerPort{
- {
- Name: openshift.GatewayOPAHTTPPortName,
- ContainerPort: openshift.GatewayOPAHTTPPort,
- Protocol: corev1.ProtocolTCP,
- },
- {
- Name: openshift.GatewayOPAInternalPortName,
- ContainerPort: openshift.GatewayOPAInternalPort,
- Protocol: corev1.ProtocolTCP,
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Path: "/live",
- Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- TimeoutSeconds: 2,
- PeriodSeconds: 30,
- FailureThreshold: 10,
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Path: "/ready",
- Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- TimeoutSeconds: 1,
- PeriodSeconds: 5,
- FailureThreshold: 12,
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
- },
- },
- Volumes: []corev1.Volume{
- {
- Name: tlsSecretVolume,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "test-gateway-http-tls",
- },
- },
- },
- },
- },
- },
- },
- },
- },
- {
- desc: "openshift-logging mode with-cert-signing-service",
- mode: lokiv1.OpenshiftLogging,
- stackName: "test",
- stackNs: "test-ns",
- featureGates: configv1.FeatureGates{
- HTTPEncryption: true,
- ServiceMonitorTLSEndpoints: true,
- OpenShift: configv1.OpenShiftFeatureGates{
- ServingCertsService: true,
- },
- },
- dpl: &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Name: "gateway",
- Namespace: "test-ns",
- },
- Spec: appsv1.DeploymentSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: gatewayContainerName,
- Args: []string{
- "--other.args=foo-bar",
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayHTTPPort),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "tls-secret",
- ReadOnly: true,
- MountPath: "/var/run/tls/http",
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
},
},
Volumes: []corev1.Volume{
@@ -672,232 +386,19 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
},
},
- },
- want: &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Name: "gateway",
- Namespace: "test-ns",
- },
- Spec: appsv1.DeploymentSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- ServiceAccountName: "gateway",
- Containers: []corev1.Container{
- {
- Name: gatewayContainerName,
- Args: []string{
- "--other.args=foo-bar",
- "--logs.read.endpoint=https://example.com",
- "--logs.tail.endpoint=https://example.com",
- "--logs.write.endpoint=https://example.com",
- fmt.Sprintf("--web.healthchecks.url=https://localhost:%d", gatewayHTTPPort),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- "--logs.tls.ca-file=/var/run/ca/service-ca.crt",
- "--tls.client-auth-type=NoClientCert",
- "--tls.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.server.key-file=/var/run/tls/http/tls.key",
- "--tls.healthchecks.server-ca-file=/var/run/ca/service-ca.crt",
- fmt.Sprintf("--tls.healthchecks.server-name=%s", "test-gateway-http.test-ns.svc.cluster.local"),
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "tls-secret",
- ReadOnly: true,
- MountPath: "/var/run/tls/http",
- },
- {
- Name: "test-ca-bundle",
- ReadOnly: true,
- MountPath: "/var/run/ca",
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- },
- {
- Name: "opa",
- Image: "quay.io/observatorium/opa-openshift:latest",
- Args: []string{
- "--log.level=warn",
- "--opa.skip-tenants=audit,infrastructure",
- "--opa.admin-groups=system:cluster-admins,cluster-admin,dedicated-admin",
- "--web.listen=:8082",
- "--web.internal.listen=:8083",
- "--web.healthchecks.url=http://localhost:8082",
- "--opa.package=lokistack",
- "--opa.matcher=kubernetes_namespace_name",
- "--tls.internal.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.internal.server.key-file=/var/run/tls/http/tls.key",
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- `--openshift.mappings=application=loki.grafana.com`,
- `--openshift.mappings=infrastructure=loki.grafana.com`,
- `--openshift.mappings=audit=loki.grafana.com`,
- },
- Ports: []corev1.ContainerPort{
- {
- Name: openshift.GatewayOPAHTTPPortName,
- ContainerPort: openshift.GatewayOPAHTTPPort,
- Protocol: corev1.ProtocolTCP,
- },
- {
- Name: openshift.GatewayOPAInternalPortName,
- ContainerPort: openshift.GatewayOPAInternalPort,
- Protocol: corev1.ProtocolTCP,
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Path: "/live",
- Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- TimeoutSeconds: 2,
- PeriodSeconds: 30,
- FailureThreshold: 10,
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Path: "/ready",
- Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- TimeoutSeconds: 1,
- PeriodSeconds: 5,
- FailureThreshold: 12,
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
- },
- },
- Volumes: []corev1.Volume{
- {
- Name: "tls-secret-volume",
- },
- {
- Name: "test-ca-bundle",
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "test-ca-bundle",
- },
- },
- },
- },
- },
- },
- },
- },
- },
- },
- {
- desc: "openshift-network mode",
- mode: lokiv1.OpenshiftNetwork,
- stackName: "test",
- stackNs: "test-ns",
- dpl: &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: "test-ns",
- },
- Spec: appsv1.DeploymentSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: gatewayContainerName,
- Args: []string{
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayHTTPPort),
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
- },
- },
- },
- },
- },
- },
- want: &appsv1.Deployment{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: "test-ns",
- },
- Spec: appsv1.DeploymentSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: gatewayContainerName,
- Args: []string{
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=https://localhost:%d", gatewayHTTPPort),
- "--tls.client-auth-type=NoClientCert",
- "--tls.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.server.key-file=/var/run/tls/http/tls.key",
- "--tls.healthchecks.server-ca-file=/var/run/ca/service-ca.crt",
- fmt.Sprintf("--tls.healthchecks.server-name=%s", "test-gateway-http.test-ns.svc.cluster.local"),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
+ },
+ want: &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-gateway",
+ Namespace: "test-ns",
+ },
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ ServiceAccountName: "test-gateway",
+ Containers: []corev1.Container{
+ {
+ Name: gatewayContainerName,
},
{
Name: "opa",
@@ -910,7 +411,14 @@ func TestConfigureDeploymentForMode(t *testing.T) {
"--web.internal.listen=:8083",
"--web.healthchecks.url=http://localhost:8082",
"--opa.package=lokistack",
- `--openshift.mappings=network=loki.grafana.com`,
+ "--opa.matcher=kubernetes_namespace_name",
+ "--tls.internal.server.cert-file=/var/run/tls/http/server/tls.crt",
+ "--tls.internal.server.key-file=/var/run/tls/http/server/tls.key",
+ "--tls.min-version=min-version",
+ "--tls.cipher-suites=cipher1,cipher2",
+ `--openshift.mappings=application=loki.grafana.com`,
+ `--openshift.mappings=infrastructure=loki.grafana.com`,
+ `--openshift.mappings=audit=loki.grafana.com`,
},
Ports: []corev1.ContainerPort{
{
@@ -929,7 +437,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
- Scheme: corev1.URISchemeHTTP,
+ Scheme: corev1.URISchemeHTTPS,
},
},
TimeoutSeconds: 2,
@@ -941,23 +449,25 @@ func TestConfigureDeploymentForMode(t *testing.T) {
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
- Scheme: corev1.URISchemeHTTP,
+ Scheme: corev1.URISchemeHTTPS,
},
},
TimeoutSeconds: 1,
PeriodSeconds: 5,
FailureThreshold: 12,
},
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: tlsSecretVolume,
+ ReadOnly: true,
+ MountPath: gatewayServerHTTPTLSDir(),
+ },
+ },
},
},
Volumes: []corev1.Volume{
{
- Name: tlsSecretVolume,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "test-gateway-http-tls",
- },
- },
+ Name: "tls-secret-volume",
},
},
},
@@ -966,14 +476,10 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
},
{
- desc: "openshift-network mode with-tls-service-monitor-config",
+ desc: "openshift-network mode",
mode: lokiv1.OpenshiftNetwork,
stackName: "test",
stackNs: "test-ns",
- featureGates: configv1.FeatureGates{
- HTTPEncryption: true,
- ServiceMonitorTLSEndpoints: true,
- },
dpl: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: "test-ns",
@@ -984,45 +490,11 @@ func TestConfigureDeploymentForMode(t *testing.T) {
Containers: []corev1.Container{
{
Name: gatewayContainerName,
- Args: []string{
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayHTTPPort),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
},
},
Volumes: []corev1.Volume{
{
- Name: tlsSecretVolume,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "test-gateway-http-tls",
- },
- },
+ Name: "tls-secret-volume",
},
},
},
@@ -1039,40 +511,6 @@ func TestConfigureDeploymentForMode(t *testing.T) {
Containers: []corev1.Container{
{
Name: gatewayContainerName,
- Args: []string{
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=https://localhost:%d", gatewayHTTPPort),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- "--tls.client-auth-type=NoClientCert",
- "--tls.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.server.key-file=/var/run/tls/http/tls.key",
- "--tls.healthchecks.server-ca-file=/var/run/ca/service-ca.crt",
- fmt.Sprintf("--tls.healthchecks.server-name=%s", "test-gateway-http.test-ns.svc.cluster.local"),
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
},
{
Name: "opa",
@@ -1085,10 +523,6 @@ func TestConfigureDeploymentForMode(t *testing.T) {
"--web.internal.listen=:8083",
"--web.healthchecks.url=http://localhost:8082",
"--opa.package=lokistack",
- "--tls.internal.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.internal.server.key-file=/var/run/tls/http/tls.key",
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
`--openshift.mappings=network=loki.grafana.com`,
},
Ports: []corev1.ContainerPort{
@@ -1108,7 +542,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
- Scheme: corev1.URISchemeHTTPS,
+ Scheme: corev1.URISchemeHTTP,
},
},
TimeoutSeconds: 2,
@@ -1120,30 +554,18 @@ func TestConfigureDeploymentForMode(t *testing.T) {
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
- Scheme: corev1.URISchemeHTTPS,
+ Scheme: corev1.URISchemeHTTP,
},
},
TimeoutSeconds: 1,
PeriodSeconds: 5,
FailureThreshold: 12,
},
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: tlsSecretVolume,
- ReadOnly: true,
- MountPath: httpTLSDir,
- },
- },
},
},
Volumes: []corev1.Volume{
{
- Name: tlsSecretVolume,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: "test-gateway-http-tls",
- },
- },
+ Name: "tls-secret-volume",
},
},
},
@@ -1152,20 +574,16 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
},
{
- desc: "openshift-network mode with-cert-signing-service",
+ desc: "openshift-network mode with http encryption",
mode: lokiv1.OpenshiftNetwork,
stackName: "test",
stackNs: "test-ns",
featureGates: configv1.FeatureGates{
HTTPEncryption: true,
ServiceMonitorTLSEndpoints: true,
- OpenShift: configv1.OpenShiftFeatureGates{
- ServingCertsService: true,
- },
},
dpl: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
- Name: "gateway",
Namespace: "test-ns",
},
Spec: appsv1.DeploymentSpec{
@@ -1174,36 +592,6 @@ func TestConfigureDeploymentForMode(t *testing.T) {
Containers: []corev1.Container{
{
Name: gatewayContainerName,
- Args: []string{
- "--other.args=foo-bar",
- "--logs.read.endpoint=http://example.com",
- "--logs.tail.endpoint=http://example.com",
- "--logs.write.endpoint=http://example.com",
- fmt.Sprintf("--web.healthchecks.url=http://localhost:%d", gatewayHTTPPort),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "tls-secret",
- ReadOnly: true,
- MountPath: "/var/run/tls/http",
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTP,
- },
- },
- },
},
},
Volumes: []corev1.Volume{
@@ -1217,57 +605,14 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
want: &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
- Name: "gateway",
Namespace: "test-ns",
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
- ServiceAccountName: "gateway",
Containers: []corev1.Container{
{
Name: gatewayContainerName,
- Args: []string{
- "--other.args=foo-bar",
- "--logs.read.endpoint=https://example.com",
- "--logs.tail.endpoint=https://example.com",
- "--logs.write.endpoint=https://example.com",
- fmt.Sprintf("--web.healthchecks.url=https://localhost:%d", gatewayHTTPPort),
- "--tls.min-version=min-version",
- "--tls.cipher-suites=cipher1,cipher2",
- "--logs.tls.ca-file=/var/run/ca/service-ca.crt",
- "--tls.client-auth-type=NoClientCert",
- "--tls.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.server.key-file=/var/run/tls/http/tls.key",
- "--tls.healthchecks.server-ca-file=/var/run/ca/service-ca.crt",
- fmt.Sprintf("--tls.healthchecks.server-name=%s", "test-gateway-http.test-ns.svc.cluster.local"),
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "tls-secret",
- ReadOnly: true,
- MountPath: "/var/run/tls/http",
- },
- {
- Name: "test-ca-bundle",
- ReadOnly: true,
- MountPath: "/var/run/ca",
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
},
{
Name: "opa",
@@ -1280,8 +625,8 @@ func TestConfigureDeploymentForMode(t *testing.T) {
"--web.internal.listen=:8083",
"--web.healthchecks.url=http://localhost:8082",
"--opa.package=lokistack",
- "--tls.internal.server.cert-file=/var/run/tls/http/tls.crt",
- "--tls.internal.server.key-file=/var/run/tls/http/tls.key",
+ "--tls.internal.server.cert-file=/var/run/tls/http/server/tls.crt",
+ "--tls.internal.server.key-file=/var/run/tls/http/server/tls.key",
"--tls.min-version=min-version",
"--tls.cipher-suites=cipher1,cipher2",
`--openshift.mappings=network=loki.grafana.com`,
@@ -1326,7 +671,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
{
Name: tlsSecretVolume,
ReadOnly: true,
- MountPath: httpTLSDir,
+ MountPath: path.Join(httpTLSDir, "server"),
},
},
},
@@ -1335,17 +680,6 @@ func TestConfigureDeploymentForMode(t *testing.T) {
{
Name: "tls-secret-volume",
},
- {
- Name: "test-ca-bundle",
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "test-ca-bundle",
- },
- },
- },
- },
},
},
},
@@ -1358,7 +692,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- err := configureGatewayDeploymentForMode(tc.dpl, tc.mode, tc.featureGates, "test", "test-ns", "min-version", "cipher1,cipher2")
+ err := configureGatewayDeploymentForMode(tc.dpl, tc.mode, tc.featureGates, "min-version", "cipher1,cipher2")
require.NoError(t, err)
require.Equal(t, tc.want, tc.dpl)
})
@@ -1427,6 +761,7 @@ func TestConfigureServiceForMode(t *testing.T) {
func TestConfigureServiceMonitorForMode(t *testing.T) {
type tt struct {
desc string
+ opts Options
mode lokiv1.ModeType
featureGates configv1.FeatureGates
sm *monitoringv1.ServiceMonitor
@@ -1436,20 +771,38 @@ func TestConfigureServiceMonitorForMode(t *testing.T) {
tc := []tt{
{
desc: "static mode",
- mode: lokiv1.Static,
+ opts: Options{
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ },
+ },
+ },
sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{},
},
{
desc: "dynamic mode",
- mode: lokiv1.Dynamic,
+ opts: Options{
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Dynamic,
+ },
+ },
+ },
sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{},
},
{
desc: "openshift-logging mode",
- mode: lokiv1.OpenshiftLogging,
- sm: &monitoringv1.ServiceMonitor{},
+ opts: Options{
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ },
+ },
+ },
+ sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{
Spec: monitoringv1.ServiceMonitorSpec{
Endpoints: []monitoringv1.Endpoint{
@@ -1464,8 +817,14 @@ func TestConfigureServiceMonitorForMode(t *testing.T) {
},
{
desc: "openshift-network mode",
- mode: lokiv1.OpenshiftNetwork,
- sm: &monitoringv1.ServiceMonitor{},
+ opts: Options{
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftNetwork,
+ },
+ },
+ },
+ sm: &monitoringv1.ServiceMonitor{},
want: &monitoringv1.ServiceMonitor{
Spec: monitoringv1.ServiceMonitorSpec{
Endpoints: []monitoringv1.Endpoint{
@@ -1480,10 +839,89 @@ func TestConfigureServiceMonitorForMode(t *testing.T) {
},
{
desc: "openshift-logging mode with-tls-service-monitor-config",
- mode: lokiv1.OpenshiftLogging,
- featureGates: configv1.FeatureGates{
- HTTPEncryption: true,
- ServiceMonitorTLSEndpoints: true,
+ opts: Options{
+ Name: "abcd",
+ Namespace: "ns",
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ },
+ },
+ Gates: configv1.FeatureGates{
+ HTTPEncryption: true,
+ ServiceMonitorTLSEndpoints: true,
+ },
+ },
+ sm: &monitoringv1.ServiceMonitor{
+ Spec: monitoringv1.ServiceMonitorSpec{
+ Endpoints: []monitoringv1.Endpoint{
+ {
+ TLSConfig: &monitoringv1.TLSConfig{
+ CAFile: "/path/to/ca/file",
+ CertFile: "/path/to/cert/file",
+ KeyFile: "/path/to/key/file",
+ },
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ },
+ },
+ },
+ },
+ want: &monitoringv1.ServiceMonitor{
+ Spec: monitoringv1.ServiceMonitorSpec{
+ Endpoints: []monitoringv1.Endpoint{
+ {
+ TLSConfig: &monitoringv1.TLSConfig{
+ CAFile: "/path/to/ca/file",
+ CertFile: "/path/to/cert/file",
+ KeyFile: "/path/to/key/file",
+ },
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ },
+ {
+ Port: openshift.GatewayOPAInternalPortName,
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ TLSConfig: &monitoringv1.TLSConfig{
+ CAFile: "/path/to/ca/file",
+ CertFile: "/path/to/cert/file",
+ KeyFile: "/path/to/key/file",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "openshift-network mode with-tls-service-monitor-config",
+ mode: lokiv1.OpenshiftNetwork,
+ opts: Options{
+ Name: "abcd",
+ Namespace: "ns",
+ Stack: lokiv1.LokiStackSpec{
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftNetwork,
+ },
+ },
+ Gates: configv1.FeatureGates{
+ HTTPEncryption: true,
+ ServiceMonitorTLSEndpoints: true,
+ },
},
sm: &monitoringv1.ServiceMonitor{
Spec: monitoringv1.ServiceMonitorSpec{
@@ -1494,6 +932,12 @@ func TestConfigureServiceMonitorForMode(t *testing.T) {
CertFile: "/path/to/cert/file",
KeyFile: "/path/to/key/file",
},
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
},
},
},
@@ -1507,12 +951,23 @@ func TestConfigureServiceMonitorForMode(t *testing.T) {
CertFile: "/path/to/cert/file",
KeyFile: "/path/to/key/file",
},
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
},
{
- Port: openshift.GatewayOPAInternalPortName,
- Path: "/metrics",
- Scheme: "https",
- BearerTokenFile: BearerTokenFile,
+ Port: openshift.GatewayOPAInternalPortName,
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
TLSConfig: &monitoringv1.TLSConfig{
CAFile: "/path/to/ca/file",
CertFile: "/path/to/cert/file",
@@ -1528,7 +983,7 @@ func TestConfigureServiceMonitorForMode(t *testing.T) {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
t.Parallel()
- err := configureGatewayServiceMonitorForMode(tc.sm, tc.mode, tc.featureGates)
+ err := configureGatewayServiceMonitorForMode(tc.sm, tc.opts)
require.NoError(t, err)
require.Equal(t, tc.want, tc.sm)
})
diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go
index 82cd8003a234c..e8df8722fce2e 100644
--- a/operator/internal/manifests/gateway_test.go
+++ b/operator/internal/manifests/gateway_test.go
@@ -2,11 +2,13 @@ package manifests
import (
"math/rand"
+ "path"
"reflect"
"testing"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+ "github.com/grafana/loki/operator/internal/manifests/internal/gateway"
"github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/google/uuid"
@@ -49,6 +51,39 @@ func TestNewGatewayDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
require.Equal(t, annotations[expected], sha1C)
}
+func TestNewGatewayDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
+ sha1C := "deadbeef"
+ ss := NewGatewayDeployment(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ CertRotationRequiredAt: "deadbeef",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ Distributor: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ Ingester: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ QueryFrontend: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ },
+ },
+ }, sha1C)
+
+ expected := "loki.grafana.com/certRotationRequiredAt"
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, expected)
+ require.Equal(t, annotations[expected], "deadbeef")
+}
+
func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) {
opts := Options{
Name: uuid.New().String(),
@@ -166,7 +201,7 @@ func TestBuildGateway_HasExtraObjectsForTenantMode(t *testing.T) {
})
require.NoError(t, err)
- require.Len(t, objs, 9)
+ require.Len(t, objs, 11)
}
func TestBuildGateway_WithExtraObjectsForTenantMode_RouteSvcMatches(t *testing.T) {
@@ -199,8 +234,8 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_RouteSvcMatches(t *testing.T
require.NoError(t, err)
- svc := objs[2].(*corev1.Service)
- rt := objs[3].(*routev1.Route)
+ svc := objs[4].(*corev1.Service)
+ rt := objs[5].(*routev1.Route)
require.Equal(t, svc.Kind, rt.Spec.To.Kind)
require.Equal(t, svc.Name, rt.Spec.To.Name)
require.Equal(t, svc.Spec.Ports[0].Name, rt.Spec.Port.TargetPort.StrVal)
@@ -237,7 +272,7 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_ServiceAccountNameMatches(t
require.NoError(t, err)
dpl := objs[1].(*appsv1.Deployment)
- sa := objs[4].(*corev1.ServiceAccount)
+ sa := objs[2].(*corev1.ServiceAccount)
require.Equal(t, dpl.Spec.Template.Spec.ServiceAccountName, sa.Name)
}
@@ -627,3 +662,183 @@ func TestBuildGateway_WithRulesEnabled(t *testing.T) {
})
}
}
+
+func TestBuildGateway_WithHTTPEncryption(t *testing.T) {
+ objs, err := BuildGateway(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ Gates: configv1.FeatureGates{
+ LokiStackGateway: true,
+ HTTPEncryption: true,
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ Ruler: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ },
+ Rules: &lokiv1.RulesSpec{
+ Enabled: true,
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ Authorization: &lokiv1.AuthorizationSpec{},
+ Authentication: []lokiv1.AuthenticationSpec{},
+ },
+ },
+ })
+
+ require.NoError(t, err)
+
+ dpl := objs[1].(*appsv1.Deployment)
+ require.NotNil(t, dpl)
+ require.Len(t, dpl.Spec.Template.Spec.Containers, 1)
+
+ c := dpl.Spec.Template.Spec.Containers[0]
+
+ expectedArgs := []string{
+ "--debug.name=lokistack-gateway",
+ "--web.listen=0.0.0.0:8080",
+ "--web.internal.listen=0.0.0.0:8081",
+ "--web.healthchecks.url=https://localhost:8080",
+ "--log.level=warn",
+ "--logs.read.endpoint=https://abcd-query-frontend-http.efgh.svc.cluster.local:3100",
+ "--logs.tail.endpoint=https://abcd-query-frontend-http.efgh.svc.cluster.local:3100",
+ "--logs.write.endpoint=https://abcd-distributor-http.efgh.svc.cluster.local:3100",
+ "--rbac.config=/etc/lokistack-gateway/rbac.yaml",
+ "--tenants.config=/etc/lokistack-gateway/tenants.yaml",
+ "--logs.rules.endpoint=https://abcd-ruler-http.efgh.svc.cluster.local:3100",
+ "--logs.rules.read-only=true",
+ "--tls.client-auth-type=NoClientCert",
+ "--tls.min-version=VersionTLS12",
+ "--tls.server.cert-file=/var/run/tls/http/server/tls.crt",
+ "--tls.server.key-file=/var/run/tls/http/server/tls.key",
+ "--tls.healthchecks.server-ca-file=/var/run/ca/server/service-ca.crt",
+ "--tls.healthchecks.server-name=abcd-gateway-http.efgh.svc.cluster.local",
+ "--tls.internal.server.cert-file=/var/run/tls/http/server/tls.crt",
+ "--tls.internal.server.key-file=/var/run/tls/http/server/tls.key",
+ "--tls.min-version=",
+ "--tls.cipher-suites=",
+ "--logs.tls.ca-file=/var/run/ca/upstream/service-ca.crt",
+ "--logs.tls.cert-file=/var/run/tls/http/upstream/tls.crt",
+ "--logs.tls.key-file=/var/run/tls/http/upstream/tls.key",
+ }
+ require.Equal(t, expectedArgs, c.Args)
+
+ expectedVolumeMounts := []corev1.VolumeMount{
+ {
+ Name: "rbac",
+ ReadOnly: true,
+ MountPath: path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayRbacFileName),
+ SubPath: "rbac.yaml",
+ },
+ {
+ Name: "tenants",
+ ReadOnly: true,
+ MountPath: path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayTenantFileName),
+ SubPath: "tenants.yaml",
+ },
+ {
+ Name: "lokistack-gateway",
+ ReadOnly: true,
+ MountPath: path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayRegoFileName),
+ SubPath: "lokistack-gateway.rego",
+ },
+ {
+ Name: "tls-secret",
+ ReadOnly: true,
+ MountPath: "/var/run/tls/http/server",
+ },
+ {
+ Name: "abcd-gateway-client-http",
+ ReadOnly: true,
+ MountPath: "/var/run/tls/http/upstream",
+ },
+ {
+ Name: "abcd-ca-bundle",
+ ReadOnly: true,
+ MountPath: "/var/run/ca/upstream",
+ },
+ {
+ Name: "abcd-gateway-ca-bundle",
+ ReadOnly: true,
+ MountPath: "/var/run/ca/server",
+ },
+ }
+ require.Equal(t, expectedVolumeMounts, c.VolumeMounts)
+
+ expectedVolumes := []corev1.Volume{
+ {
+ Name: "rbac",
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway",
+ },
+ },
+ },
+ },
+ {
+ Name: "tenants",
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway",
+ },
+ },
+ },
+ },
+ {
+ Name: "lokistack-gateway",
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway",
+ },
+ },
+ },
+ },
+ {
+ Name: "tls-secret",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "abcd-gateway-http",
+ },
+ },
+ },
+ {
+ Name: "abcd-gateway-client-http",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "abcd-gateway-client-http",
+ },
+ },
+ },
+ {
+ Name: "abcd-ca-bundle",
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ DefaultMode: &defaultConfigMapMode,
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-ca-bundle",
+ },
+ },
+ },
+ },
+ {
+ Name: "abcd-gateway-ca-bundle",
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ DefaultMode: &defaultConfigMapMode,
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "abcd-gateway-ca-bundle",
+ },
+ },
+ },
+ },
+ }
+ require.Equal(t, expectedVolumes, dpl.Spec.Template.Spec.Volumes)
+}
diff --git a/operator/internal/manifests/indexgateway.go b/operator/internal/manifests/indexgateway.go
index 5c3a685930526..49ed7afdd7ec6 100644
--- a/operator/internal/manifests/indexgateway.go
+++ b/operator/internal/manifests/indexgateway.go
@@ -36,6 +36,13 @@ func BuildIndexGateway(opts Options) ([]client.Object, error) {
}
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ caBundleName := signingCABundleName(opts.Name)
+ if err := configureServiceCA(&statefulSet.Spec.Template.Spec, caBundleName); err != nil {
+ return nil, err
+ }
+ }
+
return []client.Object{
statefulSet,
NewIndexGatewayGRPCService(opts),
@@ -121,7 +128,7 @@ func NewIndexGatewayStatefulSet(opts Options) *appsv1.StatefulSet {
}
l := ComponentLabels(LabelIndexGatewayComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1)
+ a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
return &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
@@ -183,9 +190,8 @@ func NewIndexGatewayGRPCService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
@@ -213,9 +219,8 @@ func NewIndexGatewayHTTPService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
@@ -233,7 +238,7 @@ func NewIndexGatewayHTTPService(opts Options) *corev1.Service {
func configureIndexGatewayHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameIndexGatewayHTTP(opts.Name)
- return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
}
func configureIndexGatewayGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
diff --git a/operator/internal/manifests/indexgateway_test.go b/operator/internal/manifests/indexgateway_test.go
index aec2df5cbf98d..48499e4b196a6 100644
--- a/operator/internal/manifests/indexgateway_test.go
+++ b/operator/internal/manifests/indexgateway_test.go
@@ -29,6 +29,26 @@ func TestNewIndexGatewayStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T
require.Equal(t, annotations[expected], "deadbeef")
}
+func TestNewIndexGatewayStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
+ ss := manifests.NewIndexGatewayStatefulSet(manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ CertRotationRequiredAt: "deadbeef",
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ IndexGateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+ expected := "loki.grafana.com/certRotationRequiredAt"
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, expected)
+ require.Equal(t, annotations[expected], "deadbeef")
+}
+
func TestNewIndexGatewayStatefulSet_SelectorMatchesLabels(t *testing.T) {
// You must set the .spec.selector field of a StatefulSet to match the labels of
// its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the
diff --git a/operator/internal/manifests/ingester.go b/operator/internal/manifests/ingester.go
index 58ab9c98b853d..1eb5e5ec4ef92 100644
--- a/operator/internal/manifests/ingester.go
+++ b/operator/internal/manifests/ingester.go
@@ -38,6 +38,13 @@ func BuildIngester(opts Options) ([]client.Object, error) {
}
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ caBundleName := signingCABundleName(opts.Name)
+ if err := configureServiceCA(&statefulSet.Spec.Template.Spec, caBundleName); err != nil {
+ return nil, err
+ }
+ }
+
return []client.Object{
statefulSet,
NewIngesterGRPCService(opts),
@@ -133,7 +140,7 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet {
}
l := ComponentLabels(LabelIngesterComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1)
+ a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
return &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
@@ -204,7 +211,6 @@ func NewIngesterStatefulSet(opts Options) *appsv1.StatefulSet {
// NewIngesterGRPCService creates a k8s service for the ingester GRPC endpoint
func NewIngesterGRPCService(opts Options) *corev1.Service {
- serviceName := serviceNameIngesterGRPC(opts.Name)
labels := ComponentLabels(LabelIngesterComponent, opts.Name)
return &corev1.Service{
@@ -213,9 +219,8 @@ func NewIngesterGRPCService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceNameIngesterGRPC(opts.Name),
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceNameIngesterGRPC(opts.Name),
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
@@ -243,9 +248,8 @@ func NewIngesterHTTPService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
@@ -263,54 +267,31 @@ func NewIngesterHTTPService(opts Options) *corev1.Service {
func configureIngesterHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameIngesterHTTP(opts.Name)
- return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
}
func configureIngesterGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
- caBundleName := signingCABundleName(opts.Name)
- secretVolumeSpec := corev1.PodSpec{
- Volumes: []corev1.Volume{
- {
- Name: caBundleName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: caBundleName,
- },
- },
- },
- },
- },
- }
-
secretContainerSpec := corev1.Container{
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: caBundleName,
- ReadOnly: false,
- MountPath: caBundleDir,
- },
- },
Args: []string{
// Enable GRPC over TLS for ingester client
"-ingester.client.tls-enabled=true",
fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for boltb-shipper index-gateway client
"-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
},
}
- if err := mergo.Merge(&sts.Spec.Template.Spec, secretVolumeSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge volumes")
- }
-
if err := mergo.Merge(&sts.Spec.Template.Spec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge container")
}
diff --git a/operator/internal/manifests/ingester_test.go b/operator/internal/manifests/ingester_test.go
index 84314226b59b4..657c7aadc2986 100644
--- a/operator/internal/manifests/ingester_test.go
+++ b/operator/internal/manifests/ingester_test.go
@@ -29,6 +29,26 @@ func TestNewIngesterStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
require.Equal(t, annotations[expected], "deadbeef")
}
+func TestNewIngesterStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
+ ss := manifests.NewIngesterStatefulSet(manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ CertRotationRequiredAt: "deadbeef",
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ Ingester: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+ expected := "loki.grafana.com/certRotationRequiredAt"
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, expected)
+ require.Equal(t, annotations[expected], "deadbeef")
+}
+
func TestNewIngesterStatefulSet_SelectorMatchesLabels(t *testing.T) {
// You must set the .spec.selector field of a StatefulSet to match the labels of
// its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the
diff --git a/operator/internal/manifests/mutate.go b/operator/internal/manifests/mutate.go
index f9bcde48d729e..6ba0c77c891c4 100644
--- a/operator/internal/manifests/mutate.go
+++ b/operator/internal/manifests/mutate.go
@@ -23,7 +23,7 @@ import (
// - Deployment
// - StatefulSet
// - ServiceMonitor
-func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn {
+func MutateFuncFor(existing, desired client.Object, depAnnotations map[string]string) controllerutil.MutateFn {
return func() error {
existingAnnotations := existing.GetAnnotations()
if err := mergeWithOverride(&existingAnnotations, desired.GetAnnotations()); err != nil {
@@ -47,6 +47,16 @@ func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn {
wantCm := desired.(*corev1.ConfigMap)
mutateConfigMap(cm, wantCm)
+ case *corev1.Secret:
+ s := existing.(*corev1.Secret)
+ wantS := desired.(*corev1.Secret)
+ mutateSecret(s, wantS)
+ existingAnnotations := s.GetAnnotations()
+ if err := mergeWithOverride(&existingAnnotations, depAnnotations); err != nil {
+ return err
+ }
+ s.SetAnnotations(existingAnnotations)
+
case *corev1.Service:
svc := existing.(*corev1.Service)
wantSvc := desired.(*corev1.Service)
@@ -124,10 +134,18 @@ func mergeWithOverride(dst, src interface{}) error {
}
func mutateConfigMap(existing, desired *corev1.ConfigMap) {
+ existing.Annotations = desired.Annotations
+ existing.Labels = desired.Labels
existing.BinaryData = desired.BinaryData
existing.Data = desired.Data
}
+func mutateSecret(existing, desired *corev1.Secret) {
+ existing.Annotations = desired.Annotations
+ existing.Labels = desired.Labels
+ existing.Data = desired.Data
+}
+
func mutateServiceAccount(existing, desired *corev1.ServiceAccount) {
existing.Annotations = desired.Annotations
existing.Labels = desired.Labels
@@ -160,6 +178,10 @@ func mutateRoleBinding(existing, desired *rbacv1.RoleBinding) {
func mutateServiceMonitor(existing, desired *monitoringv1.ServiceMonitor) {
// ServiceMonitor selector is immutable so we set this value only if
// a new object is going to be created
+ existing.Annotations = desired.Annotations
+ existing.Labels = desired.Labels
+ existing.Spec.Endpoints = desired.Spec.Endpoints
+ existing.Spec.JobLabel = desired.Spec.JobLabel
}
func mutateIngress(existing, desired *networkingv1.Ingress) {
diff --git a/operator/internal/manifests/mutate_test.go b/operator/internal/manifests/mutate_test.go
index 419a01a86e49f..08c8583dc7ce6 100644
--- a/operator/internal/manifests/mutate_test.go
+++ b/operator/internal/manifests/mutate_test.go
@@ -40,7 +40,7 @@ func TestGetMutateFunc_MutateObjectMeta(t *testing.T) {
}
got := &corev1.ConfigMap{}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
err := f()
require.NoError(t, err)
@@ -53,7 +53,7 @@ func TestGetMutateFunc_MutateObjectMeta(t *testing.T) {
func TestGetMutateFunc_ReturnErrOnNotSupportedType(t *testing.T) {
got := &corev1.Endpoints{}
want := &corev1.Endpoints{}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
require.Error(t, f())
}
@@ -69,7 +69,7 @@ func TestGetMutateFunc_MutateConfigMap(t *testing.T) {
BinaryData: map[string][]byte{"btest": []byte("btestss")},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
err := f()
require.NoError(t, err)
@@ -116,7 +116,7 @@ func TestGetMutateFunc_MutateServiceSpec(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
err := f()
require.NoError(t, err)
@@ -231,7 +231,7 @@ func TestGetMutateFunc_MutateServiceAccountObjectMeta(t *testing.T) {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
- f := manifests.MutateFuncFor(tt.got, tt.want)
+ f := manifests.MutateFuncFor(tt.got, tt.want, nil)
err := f()
require.NoError(t, err)
@@ -293,7 +293,7 @@ func TestGetMutateFunc_MutateClusterRole(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
err := f()
require.NoError(t, err)
@@ -358,7 +358,7 @@ func TestGetMutateFunc_MutateClusterRoleBinding(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
err := f()
require.NoError(t, err)
@@ -413,7 +413,7 @@ func TestGetMutateFunc_MutateRole(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
err := f()
require.NoError(t, err)
@@ -478,7 +478,7 @@ func TestGetMutateFunc_MutateRoleBinding(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
err := f()
require.NoError(t, err)
@@ -597,7 +597,7 @@ func TestGeMutateFunc_MutateDeploymentSpec(t *testing.T) {
tst := tst
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- f := manifests.MutateFuncFor(tst.got, tst.want)
+ f := manifests.MutateFuncFor(tst.got, tst.want, nil)
err := f()
require.NoError(t, err)
@@ -754,7 +754,7 @@ func TestGeMutateFunc_MutateStatefulSetSpec(t *testing.T) {
tst := tst
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- f := manifests.MutateFuncFor(tst.got, tst.want)
+ f := manifests.MutateFuncFor(tst.got, tst.want, nil)
err := f()
require.NoError(t, err)
@@ -881,7 +881,11 @@ func TestGetMutateFunc_MutateServiceMonitorSpec(t *testing.T) {
},
},
want: &monitoringv1.ServiceMonitor{
- ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.Now()},
+ ObjectMeta: metav1.ObjectMeta{
+ CreationTimestamp: metav1.Now(),
+ Labels: map[string]string{"test": "label"},
+ Annotations: map[string]string{"test": "annotations"},
+ },
Spec: monitoringv1.ServiceMonitorSpec{
JobLabel: "some-job-new",
Endpoints: []monitoringv1.Endpoint{
@@ -927,14 +931,18 @@ func TestGetMutateFunc_MutateServiceMonitorSpec(t *testing.T) {
tst := tst
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- f := manifests.MutateFuncFor(tst.got, tst.want)
+ f := manifests.MutateFuncFor(tst.got, tst.want, nil)
err := f()
require.NoError(t, err)
// Ensure not mutated
- require.NotEqual(t, tst.got.Spec.JobLabel, tst.want.Spec.JobLabel)
- require.NotEqual(t, tst.got.Spec.Endpoints, tst.want.Spec.Endpoints)
+ require.Equal(t, tst.got.Annotations, tst.want.Annotations)
+ require.Equal(t, tst.got.Labels, tst.want.Labels)
+ require.Equal(t, tst.got.Spec.Endpoints, tst.want.Spec.Endpoints)
+ require.Equal(t, tst.got.Spec.JobLabel, tst.want.Spec.JobLabel)
+ require.Equal(t, tst.got.Spec.Endpoints, tst.want.Spec.Endpoints)
require.NotEqual(t, tst.got.Spec.NamespaceSelector, tst.want.Spec.NamespaceSelector)
+ require.NotEqual(t, tst.got.Spec.Selector, tst.want.Spec.Selector)
})
}
}
@@ -995,7 +1003,7 @@ func TestGetMutateFunc_MutateIngress(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(got, want, nil)
err := f()
require.NoError(t, err)
@@ -1047,8 +1055,8 @@ func TestGetMutateFunc_MutateRoute(t *testing.T) {
},
},
}
+ f := manifests.MutateFuncFor(got, want, nil)
- f := manifests.MutateFuncFor(got, want)
err := f()
require.NoError(t, err)
diff --git a/operator/internal/manifests/openshift/build.go b/operator/internal/manifests/openshift/build.go
index f203c621a1601..4d92348910090 100644
--- a/operator/internal/manifests/openshift/build.go
+++ b/operator/internal/manifests/openshift/build.go
@@ -9,7 +9,7 @@ import (
func BuildGatewayObjects(opts Options) []client.Object {
return []client.Object{
BuildRoute(opts),
- BuildGatewayServiceAccount(opts),
+ BuildGatewayCAConfigMap(opts),
BuildGatewayClusterRole(opts),
BuildGatewayClusterRoleBinding(opts),
BuildMonitoringRole(opts),
@@ -17,18 +17,11 @@ func BuildGatewayObjects(opts Options) []client.Object {
}
}
-// BuildLokiStackObjects returns a list of auxiliary openshift/k8s objects
-// for lokistack deployments on OpenShift.
-func BuildLokiStackObjects(opts Options) []client.Object {
- return []client.Object{
- BuildServiceCAConfigMap(opts),
- }
-}
-
// BuildRulerObjects returns a list of auxiliary openshift/k8s objects
// for lokistack ruler deployments on OpenShift.
func BuildRulerObjects(opts Options) []client.Object {
return []client.Object{
+ BuildAlertManagerCAConfigMap(opts),
BuildRulerServiceAccount(opts),
BuildRulerClusterRole(opts),
BuildRulerClusterRoleBinding(opts),
diff --git a/operator/internal/manifests/openshift/build_test.go b/operator/internal/manifests/openshift/build_test.go
index 1180383d91078..16170d28b9050 100644
--- a/operator/internal/manifests/openshift/build_test.go
+++ b/operator/internal/manifests/openshift/build_test.go
@@ -1,29 +1,15 @@
package openshift
import (
- "encoding/json"
"testing"
"github.com/stretchr/testify/require"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- routev1 "github.com/openshift/api/route/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
)
-func TestBuildGatewayObjects_ServiceAccountRefMatches(t *testing.T) {
- opts := NewOptions(lokiv1.OpenshiftLogging, "abc", "ns", "abc", "example.com", "abc", "abc", map[string]string{}, map[string]TenantData{}, "abc")
-
- objs := BuildGatewayObjects(opts)
- sa := objs[1].(*corev1.ServiceAccount)
- rb := objs[3].(*rbacv1.ClusterRoleBinding)
-
- require.Equal(t, sa.Kind, rb.Subjects[0].Kind)
- require.Equal(t, sa.Name, rb.Subjects[0].Name)
- require.Equal(t, sa.Namespace, rb.Subjects[0].Namespace)
-}
-
func TestBuildGatewayObjects_ClusterRoleRefMatches(t *testing.T) {
opts := NewOptions(lokiv1.OpenshiftLogging, "abc", "ns", "abc", "example.com", "abc", "abc", map[string]string{}, map[string]TenantData{}, "abc")
@@ -46,39 +32,13 @@ func TestBuildGatewayObjects_MonitoringClusterRoleRefMatches(t *testing.T) {
require.Equal(t, cr.Name, rb.RoleRef.Name)
}
-func TestBuildGatewayObjects_ServiceAccountAnnotationsRouteRefMatches(t *testing.T) {
- opts := NewOptions(lokiv1.OpenshiftLogging, "abc", "ns", "abc", "example.com", "abc", "abc", map[string]string{}, map[string]TenantData{}, "abc")
-
- objs := BuildGatewayObjects(opts)
- rt := objs[0].(*routev1.Route)
- sa := objs[1].(*corev1.ServiceAccount)
-
- type oauthRedirectReference struct {
- Kind string `json:"kind"`
- APIVersion string `json:"apiVersion"`
- Ref *struct {
- Kind string `json:"kind"`
- Name string `json:"name"`
- } `json:"reference"`
- }
-
- for _, a := range sa.Annotations {
- oauthRef := oauthRedirectReference{}
- err := json.Unmarshal([]byte(a), &oauthRef)
- require.NoError(t, err)
-
- require.Equal(t, rt.Name, oauthRef.Ref.Name)
- require.Equal(t, rt.Kind, oauthRef.Ref.Kind)
- }
-}
-
-func TestBuildRulerObjects(t *testing.T) {
+func TestBuildRulerObjects_ClusterRoleRefMatches(t *testing.T) {
opts := NewOptions(lokiv1.OpenshiftLogging, "abc", "ns", "abc", "example.com", "abc", "abc", map[string]string{}, map[string]TenantData{}, "abc")
objs := BuildRulerObjects(opts)
- sa := objs[0].(*corev1.ServiceAccount)
- cr := objs[1].(*rbacv1.ClusterRole)
- rb := objs[2].(*rbacv1.ClusterRoleBinding)
+ sa := objs[1].(*corev1.ServiceAccount)
+ cr := objs[2].(*rbacv1.ClusterRole)
+ rb := objs[3].(*rbacv1.ClusterRoleBinding)
require.Equal(t, sa.Kind, rb.Subjects[0].Kind)
require.Equal(t, sa.Name, rb.Subjects[0].Name)
diff --git a/operator/internal/manifests/openshift/configure.go b/operator/internal/manifests/openshift/configure.go
index 0b03674c8eab1..ab710a548c4c8 100644
--- a/operator/internal/manifests/openshift/configure.go
+++ b/operator/internal/manifests/openshift/configure.go
@@ -2,9 +2,6 @@ package openshift
import (
"fmt"
- "path"
- "regexp"
- "strings"
"github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
@@ -39,8 +36,6 @@ var (
networkTenants = []string{
tenantNetwork,
}
-
- logsEndpointRe = regexp.MustCompile(`.*logs..*.endpoint.*`)
)
// GetTenants return the slice of all supported tenants for a specified mode
@@ -62,110 +57,18 @@ func GetTenants(mode lokiv1.ModeType) []string {
func ConfigureGatewayDeployment(
d *appsv1.Deployment,
mode lokiv1.ModeType,
- gwContainerName string,
- secretVolumeName, tlsDir, certFile, keyFile string,
- caBundleVolumeName, caDir, caFile string,
- withTLS, withCertSigningService bool,
- secretName, serverName string,
- gatewayHTTPPort int,
- minTLSVersion string,
- ciphers string,
+ secretVolumeName, tlsDir string,
+ minTLSVersion, ciphers string,
+ withTLS bool,
) error {
- var gwIndex int
- for i, c := range d.Spec.Template.Spec.Containers {
- if c.Name == gwContainerName {
- gwIndex = i
- break
- }
- }
-
- gwContainer := d.Spec.Template.Spec.Containers[gwIndex].DeepCopy()
- gwArgs := gwContainer.Args
- gwVolumes := d.Spec.Template.Spec.Volumes
-
- if withCertSigningService {
- for i, a := range gwArgs {
- if logsEndpointRe.MatchString(a) {
- gwContainer.Args[i] = strings.Replace(a, "http", "https", 1)
- }
- }
-
- gwArgs = append(gwArgs, fmt.Sprintf("--logs.tls.ca-file=%s/%s", caDir, caFile))
-
- gwContainer.VolumeMounts = append(gwContainer.VolumeMounts, corev1.VolumeMount{
- Name: caBundleVolumeName,
- ReadOnly: true,
- MountPath: caDir,
- })
-
- gwVolumes = append(gwVolumes, corev1.Volume{
- Name: caBundleVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: caBundleVolumeName,
- },
- },
- },
- })
- }
-
- for i, a := range gwArgs {
- if strings.HasPrefix(a, "--web.healthchecks.url=") {
- gwArgs[i] = fmt.Sprintf("--web.healthchecks.url=https://localhost:%d", gatewayHTTPPort)
- break
- }
- }
-
- certFilePath := path.Join(tlsDir, certFile)
- keyFilePath := path.Join(tlsDir, keyFile)
- caFilePath := path.Join(caDir, caFile)
- gwArgs = append(gwArgs,
- "--tls.client-auth-type=NoClientCert",
- fmt.Sprintf("--tls.server.cert-file=%s", certFilePath),
- fmt.Sprintf("--tls.server.key-file=%s", keyFilePath),
- fmt.Sprintf("--tls.healthchecks.server-ca-file=%s", caFilePath),
- fmt.Sprintf("--tls.healthchecks.server-name=%s", serverName))
-
- gwContainer.ReadinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS
- gwContainer.LivenessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS
-
- // Create and mount TLS secrets volumes if not already created.
- if !withTLS {
- gwVolumes = append(gwVolumes, corev1.Volume{
- Name: secretVolumeName,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: secretName,
- },
- },
- })
-
- gwContainer.VolumeMounts = append(gwContainer.VolumeMounts, corev1.VolumeMount{
- Name: secretVolumeName,
- ReadOnly: true,
- MountPath: tlsDir,
- })
-
- // Add TLS profile info args since openshift gateway always uses TLS.
- gwArgs = append(gwArgs,
- fmt.Sprintf("--tls.min-version=%s", minTLSVersion),
- fmt.Sprintf("--tls.cipher-suites=%s", ciphers))
- }
-
- gwContainer.Args = gwArgs
-
p := corev1.PodSpec{
ServiceAccountName: d.GetName(),
Containers: []corev1.Container{
- *gwContainer,
- newOPAOpenShiftContainer(mode, secretVolumeName, tlsDir, certFile, keyFile, minTLSVersion, ciphers, withTLS),
+ newOPAOpenShiftContainer(mode, secretVolumeName, tlsDir, minTLSVersion, ciphers, withTLS),
},
- Volumes: gwVolumes,
}
- if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
+ if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge sidecar container spec ")
}
@@ -198,13 +101,15 @@ func ConfigureGatewayServiceMonitor(sm *monitoringv1.ServiceMonitor, withTLS boo
var opaEndpoint monitoringv1.Endpoint
if withTLS {
+ bearerTokenSecret := sm.Spec.Endpoints[0].BearerTokenSecret
tlsConfig := sm.Spec.Endpoints[0].TLSConfig
+
opaEndpoint = monitoringv1.Endpoint{
- Port: opaMetricsPortName,
- Path: "/metrics",
- Scheme: "https",
- BearerTokenFile: bearerTokenFile,
- TLSConfig: tlsConfig,
+ Port: opaMetricsPortName,
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: bearerTokenSecret,
+ TLSConfig: tlsConfig,
}
} else {
opaEndpoint = monitoringv1.Endpoint{
@@ -225,44 +130,30 @@ func ConfigureGatewayServiceMonitor(sm *monitoringv1.ServiceMonitor, withTLS boo
return nil
}
-// ConfigureQueryFrontendDeployment configures use of TLS when enabled.
-func ConfigureQueryFrontendDeployment(
- d *appsv1.Deployment,
- proxyURL string,
- qfContainerName string,
- caBundleVolumeName, caDir, caFile string,
+// ConfigureRulerStatefulSet configures the ruler to use the cluster monitoring alertmanager.
+func ConfigureRulerStatefulSet(
+ ss *appsv1.StatefulSet,
+ alertmanagerCABundleName string,
+ token, caDir, caPath string,
+ monitorServerName, rulerContainerName string,
) error {
- var qfIdx int
- for i, c := range d.Spec.Template.Spec.Containers {
- if c.Name == qfContainerName {
- qfIdx = i
+ var rulerIndex int
+ for i, c := range ss.Spec.Template.Spec.Containers {
+ if c.Name == rulerContainerName {
+ rulerIndex = i
break
}
}
- containerSpec := corev1.Container{
- Args: []string{
- fmt.Sprintf("-frontend.tail-proxy-url=%s", proxyURL),
- fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s/%s", caDir, caFile),
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: caBundleVolumeName,
- ReadOnly: true,
- MountPath: caDir,
- },
- },
- }
-
- p := corev1.PodSpec{
+ secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
{
- Name: caBundleVolumeName,
+ Name: alertmanagerCABundleName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
DefaultMode: &defaultConfigMapMode,
LocalObjectReference: corev1.LocalObjectReference{
- Name: caBundleVolumeName,
+ Name: alertmanagerCABundleName,
},
},
},
@@ -270,39 +161,20 @@ func ConfigureQueryFrontendDeployment(
},
}
- if err := mergo.Merge(&d.Spec.Template.Spec.Containers[qfIdx], containerSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to add tls config args")
- }
-
- if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to add tls volumes")
- }
-
- return nil
-}
-
-// ConfigureRulerStatefulSet configures the ruler to use the cluster monitoring alertmanager.
-func ConfigureRulerStatefulSet(
- ss *appsv1.StatefulSet,
- token, caBundleVolumeName, caDir, caFile string,
- monitorServerName, rulerContainerName string,
-) error {
- var rulerIndex int
- for i, c := range ss.Spec.Template.Spec.Containers {
- if c.Name == rulerContainerName {
- rulerIndex = i
- break
- }
- }
-
rulerContainer := ss.Spec.Template.Spec.Containers[rulerIndex].DeepCopy()
rulerContainer.Args = append(rulerContainer.Args,
- fmt.Sprintf("-ruler.alertmanager-client.tls-ca-path=%s/%s", caDir, caFile),
+ fmt.Sprintf("-ruler.alertmanager-client.tls-ca-path=%s", caPath),
fmt.Sprintf("-ruler.alertmanager-client.tls-server-name=%s", monitorServerName),
fmt.Sprintf("-ruler.alertmanager-client.credentials-file=%s", token),
)
+ rulerContainer.VolumeMounts = append(rulerContainer.VolumeMounts, corev1.VolumeMount{
+ Name: alertmanagerCABundleName,
+ ReadOnly: true,
+ MountPath: caDir,
+ })
+
p := corev1.PodSpec{
ServiceAccountName: ss.GetName(),
Containers: []corev1.Container{
@@ -310,6 +182,10 @@ func ConfigureRulerStatefulSet(
},
}
+ if err := mergo.Merge(&ss.Spec.Template.Spec, secretVolumeSpec, mergo.WithAppendSlice); err != nil {
+ return kverrors.Wrap(err, "failed to merge volumes")
+ }
+
if err := mergo.Merge(&ss.Spec.Template.Spec, p, mergo.WithOverride); err != nil {
return kverrors.Wrap(err, "failed to merge ruler container spec ")
}
diff --git a/operator/internal/manifests/openshift/opa_openshift.go b/operator/internal/manifests/openshift/opa_openshift.go
index 40faf39788182..3104a17d22829 100644
--- a/operator/internal/manifests/openshift/opa_openshift.go
+++ b/operator/internal/manifests/openshift/opa_openshift.go
@@ -21,7 +21,7 @@ const (
opaDefaultLabelMatcher = "kubernetes_namespace_name"
)
-func newOPAOpenShiftContainer(mode lokiv1.ModeType, secretVolumeName, tlsDir, certFile, keyFile, minTLSVersion, ciphers string, withTLS bool) corev1.Container {
+func newOPAOpenShiftContainer(mode lokiv1.ModeType, secretVolumeName, tlsDir, minTLSVersion, ciphers string, withTLS bool) corev1.Container {
var (
image string
args []string
@@ -52,8 +52,8 @@ func newOPAOpenShiftContainer(mode lokiv1.ModeType, secretVolumeName, tlsDir, ce
}
if withTLS {
- certFilePath := path.Join(tlsDir, certFile)
- keyFilePath := path.Join(tlsDir, keyFile)
+ certFilePath := path.Join(tlsDir, corev1.TLSCertKey)
+ keyFilePath := path.Join(tlsDir, corev1.TLSPrivateKeyKey)
args = append(args, []string{
fmt.Sprintf("--tls.internal.server.cert-file=%s", certFilePath),
diff --git a/operator/internal/manifests/openshift/service_ca.go b/operator/internal/manifests/openshift/service_ca.go
index bb201d2b761f0..247c1e856c0da 100644
--- a/operator/internal/manifests/openshift/service_ca.go
+++ b/operator/internal/manifests/openshift/service_ca.go
@@ -5,10 +5,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-// BuildServiceCAConfigMap returns a k8s configmap for the LokiStack
+// BuildGatewayCAConfigMap returns a k8s configmap for the LokiStack
// serviceCA configmap. This configmap is used to configure
// the gateway and components to verify TLS certificates.
-func BuildServiceCAConfigMap(opts Options) *corev1.ConfigMap {
+func BuildGatewayCAConfigMap(opts Options) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
@@ -24,3 +24,23 @@ func BuildServiceCAConfigMap(opts Options) *corev1.ConfigMap {
},
}
}
+
+// BuildAlertManagerCAConfigMap returns a k8s configmap for the LokiStack
+// alertmanager serviceCA configmap. This configmap is used to configure
+// the ruler to verify AlertManager TLS certificates.
+func BuildAlertManagerCAConfigMap(opts Options) *corev1.ConfigMap {
+ return &corev1.ConfigMap{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ConfigMap",
+ APIVersion: corev1.SchemeGroupVersion.String(),
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ InjectCABundleKey: "true",
+ },
+ Labels: opts.BuildOpts.Labels,
+ Name: alertmanagerCABundleName(opts),
+ Namespace: opts.BuildOpts.LokiStackNamespace,
+ },
+ }
+}
diff --git a/operator/internal/manifests/openshift/serviceaccount.go b/operator/internal/manifests/openshift/serviceaccount.go
index 82b3805a0ab2f..216331cf13a2b 100644
--- a/operator/internal/manifests/openshift/serviceaccount.go
+++ b/operator/internal/manifests/openshift/serviceaccount.go
@@ -7,25 +7,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-// BuildGatewayServiceAccount returns a k8s object for the LokiStack Gateway
-// serviceaccount. This ServiceAccount is used in parallel as an
-// OpenShift OAuth Client.
-func BuildGatewayServiceAccount(opts Options) client.Object {
- return &corev1.ServiceAccount{
- TypeMeta: metav1.TypeMeta{
- Kind: "ServiceAccount",
- APIVersion: corev1.SchemeGroupVersion.String(),
- },
- ObjectMeta: metav1.ObjectMeta{
- Annotations: serviceAccountAnnotations(opts),
- Labels: opts.BuildOpts.Labels,
- Name: gatewayServiceAccountName(opts),
- Namespace: opts.BuildOpts.LokiStackNamespace,
- },
- AutomountServiceAccountToken: pointer.Bool(true),
- }
-}
-
// BuildRulerServiceAccount returns a k8s object for the LokiStack Ruler
// serviceaccount.
// This ServiceAccount is used to autheticate and access the alertmanager host.
@@ -36,10 +17,9 @@ func BuildRulerServiceAccount(opts Options) client.Object {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Annotations: serviceAccountAnnotations(opts),
- Labels: opts.BuildOpts.Labels,
- Name: rulerServiceAccountName(opts),
- Namespace: opts.BuildOpts.LokiStackNamespace,
+ Labels: opts.BuildOpts.Labels,
+ Name: rulerServiceAccountName(opts),
+ Namespace: opts.BuildOpts.LokiStackNamespace,
},
AutomountServiceAccountToken: pointer.Bool(true),
}
diff --git a/operator/internal/manifests/openshift/serviceaccount_test.go b/operator/internal/manifests/openshift/serviceaccount_test.go
deleted file mode 100644
index bb857b4b7798b..0000000000000
--- a/operator/internal/manifests/openshift/serviceaccount_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package openshift
-
-import (
- "fmt"
- "testing"
-
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestBuildServiceAccount_AnnotationsMatchLoggingTenants(t *testing.T) {
- opts := NewOptions(lokiv1.OpenshiftLogging, "abc", "ns", "abc", "example.com", "abc", "abc", map[string]string{}, map[string]TenantData{}, "abc")
-
- sa := BuildGatewayServiceAccount(opts)
- require.Len(t, sa.GetAnnotations(), len(loggingTenants))
-
- var keys []string
- for key := range sa.GetAnnotations() {
- keys = append(keys, key)
- }
-
- for _, name := range loggingTenants {
- v := fmt.Sprintf("serviceaccounts.openshift.io/oauth-redirectreference.%s", name)
- require.Contains(t, keys, v)
- }
-}
-
-func TestBuildServiceAccount_AnnotationsMatchNetworkTenants(t *testing.T) {
- opts := NewOptions(lokiv1.OpenshiftNetwork, "def", "ns2", "def", "example2.com", "def", "def", map[string]string{}, map[string]TenantData{}, "abc")
-
- sa := BuildGatewayServiceAccount(opts)
- require.Len(t, sa.GetAnnotations(), len(networkTenants))
-
- var keys []string
- for key := range sa.GetAnnotations() {
- keys = append(keys, key)
- }
-
- for _, name := range networkTenants {
- v := fmt.Sprintf("serviceaccounts.openshift.io/oauth-redirectreference.%s", name)
- require.Contains(t, keys, v)
- }
-}
diff --git a/operator/internal/manifests/openshift/var.go b/operator/internal/manifests/openshift/var.go
index 7bac13007e1b3..e0711f45d7ab9 100644
--- a/operator/internal/manifests/openshift/var.go
+++ b/operator/internal/manifests/openshift/var.go
@@ -15,8 +15,6 @@ var (
// GatewayOPAInternalPortName is the HTTP container metrics port name of the OpenPolicyAgent sidecar.
GatewayOPAInternalPortName = "opa-metrics"
- bearerTokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
-
cookieSecretLength = 32
allowedRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
@@ -65,10 +63,17 @@ func rulerServiceAccountName(opts Options) string {
}
func serviceCABundleName(opts Options) string {
- return fmt.Sprintf("%s-ca-bundle", opts.BuildOpts.LokiStackName)
+ return fmt.Sprintf("%s-ca-bundle", opts.BuildOpts.GatewayName)
+}
+
+func alertmanagerCABundleName(opts Options) string {
+ return fmt.Sprintf("%s-ca-bundle", opts.BuildOpts.RulerName)
}
-func serviceAccountAnnotations(opts Options) map[string]string {
+// ServiceAccountAnnotations returns a map of OpenShift specific routes for ServiceAccounts.
+// Specifically the serviceacount will be annotated for each tenant with the OAuthRedirectReference
+// to make the serviceaccount a valid oauth-client.
+func ServiceAccountAnnotations(opts Options) map[string]string {
a := make(map[string]string, len(opts.Authentication))
for _, auth := range opts.Authentication {
key := fmt.Sprintf("serviceaccounts.openshift.io/oauth-redirectreference.%s", auth.TenantName)
diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go
index ea7c48638d0cd..b1857d3431039 100644
--- a/operator/internal/manifests/options.go
+++ b/operator/internal/manifests/options.go
@@ -14,12 +14,13 @@ import (
// Options is a set of configuration values to use when building manifests such as resource sizes, etc.
// Most of this should be provided - either directly or indirectly - by the user.
type Options struct {
- Name string
- Namespace string
- Image string
- GatewayImage string
- GatewayBaseDomain string
- ConfigSHA1 string
+ Name string
+ Namespace string
+ Image string
+ GatewayImage string
+ GatewayBaseDomain string
+ ConfigSHA1 string
+ CertRotationRequiredAt string
Gates configv1.FeatureGates
Stack lokiv1.LokiStackSpec
diff --git a/operator/internal/manifests/querier.go b/operator/internal/manifests/querier.go
index a0a3cc98c7725..be11bf7d2ce67 100644
--- a/operator/internal/manifests/querier.go
+++ b/operator/internal/manifests/querier.go
@@ -37,6 +37,13 @@ func BuildQuerier(opts Options) ([]client.Object, error) {
}
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ caBundleName := signingCABundleName(opts.Name)
+ if err := configureServiceCA(&deployment.Spec.Template.Spec, caBundleName); err != nil {
+ return nil, err
+ }
+ }
+
return []client.Object{
deployment,
NewQuerierGRPCService(opts),
@@ -122,7 +129,7 @@ func NewQuerierDeployment(opts Options) *appsv1.Deployment {
}
l := ComponentLabels(LabelQuerierComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1)
+ a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -164,9 +171,8 @@ func NewQuerierGRPCService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
@@ -194,9 +200,8 @@ func NewQuerierHTTPService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
@@ -214,60 +219,39 @@ func NewQuerierHTTPService(opts Options) *corev1.Service {
func configureQuerierHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
serviceName := serviceNameQuerierHTTP(opts.Name)
- return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
}
func configureQuerierGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
- caBundleName := signingCABundleName(opts.Name)
- secretVolumeSpec := corev1.PodSpec{
- Volumes: []corev1.Volume{
- {
- Name: caBundleName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: caBundleName,
- },
- },
- },
- },
- },
- }
-
secretContainerSpec := corev1.Container{
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: caBundleName,
- ReadOnly: false,
- MountPath: caBundleDir,
- },
- },
Args: []string{
// Enable GRPC over TLS for ingester client
"-ingester.client.tls-enabled=true",
fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for query frontend client
"-querier.frontend-client.tls-enabled=true",
fmt.Sprintf("-querier.frontend-client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
fmt.Sprintf("-querier.frontend-client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-querier.frontend-client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-querier.frontend-client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-querier.frontend-client.tls-key-path=%s", lokiServerGRPCTLSKey()),
fmt.Sprintf("-querier.frontend-client.tls-server-name=%s", fqdn(serviceNameQueryFrontendGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for boltb-shipper index-gateway client
"-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
},
}
- if err := mergo.Merge(&deployment.Spec.Template.Spec, secretVolumeSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge volumes")
- }
-
if err := mergo.Merge(&deployment.Spec.Template.Spec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge container")
}
diff --git a/operator/internal/manifests/querier_test.go b/operator/internal/manifests/querier_test.go
index a452f0906825d..fe81d32a52f71 100644
--- a/operator/internal/manifests/querier_test.go
+++ b/operator/internal/manifests/querier_test.go
@@ -29,6 +29,26 @@ func TestNewQuerierDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
require.Equal(t, annotations[expected], "deadbeef")
}
+func TestNewQuerierDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
+ ss := manifests.NewQuerierDeployment(manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ CertRotationRequiredAt: "deadbeef",
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+
+ expected := "loki.grafana.com/certRotationRequiredAt"
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, expected)
+ require.Equal(t, annotations[expected], "deadbeef")
+}
+
func TestNewQuerierDeployment_SelectorMatchesLabels(t *testing.T) {
// You must set the .spec.selector field of a Deployment to match the labels of
// its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the
diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go
index 4785db7b77f4f..935d061cde193 100644
--- a/operator/internal/manifests/query-frontend.go
+++ b/operator/internal/manifests/query-frontend.go
@@ -32,6 +32,13 @@ func BuildQueryFrontend(opts Options) ([]client.Object, error) {
}
}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ caBundleName := signingCABundleName(opts.Name)
+ if err := configureServiceCA(&deployment.Spec.Template.Spec, caBundleName); err != nil {
+ return nil, err
+ }
+ }
+
return []client.Object{
deployment,
NewQueryFrontendGRPCService(opts),
@@ -129,7 +136,7 @@ func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment {
}
l := ComponentLabels(LabelQueryFrontendComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1)
+ a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
@@ -171,9 +178,8 @@ func NewQueryFrontendGRPCService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
@@ -201,9 +207,8 @@ func NewQueryFrontendHTTPService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
@@ -220,80 +225,36 @@ func NewQueryFrontendHTTPService(opts Options) *corev1.Service {
}
func configureQueryFrontendHTTPServicePKI(deployment *appsv1.Deployment, opts Options) error {
- serviceName := serviceNameQueryFrontendHTTP(opts.Name)
- caBundleName := signingCABundleName(opts.Name)
-
- err := configureTailCA(
- deployment,
- lokiFrontendContainerName,
- caBundleName,
- caBundleDir,
- caFile,
- opts.TLSProfile.MinTLSVersion,
- opts.TLSCipherSuites(),
- )
- if err != nil {
- return err
- }
-
- return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName)
-}
-
-func configureQueryFrontendGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
- serviceName := serviceNameQueryFrontendGRPC(opts.Name)
- return configureGRPCServicePKI(&deployment.Spec.Template.Spec, serviceName)
-}
-
-// ConfigureQueryFrontendDeployment configures CA certificate when TLS is enabled.
-func configureTailCA(d *appsv1.Deployment,
- qfContainerName, caBundleVolumeName, caDir, caFile, minTLSVersion, cipherSuites string,
-) error {
var qfIdx int
- for i, c := range d.Spec.Template.Spec.Containers {
- if c.Name == qfContainerName {
+ for i, c := range deployment.Spec.Template.Spec.Containers {
+ if c.Name == lokiFrontendContainerName {
qfIdx = i
break
}
}
+ url := fmt.Sprintf("https://%s:%d", fqdn(serviceNameQuerierHTTP(opts.Name), opts.Namespace), httpPort)
+
containerSpec := corev1.Container{
Args: []string{
- fmt.Sprintf("-frontend.tail-tls-config.tls-cipher-suites=%s", cipherSuites),
- fmt.Sprintf("-frontend.tail-tls-config.tls-min-version=%s", minTLSVersion),
- fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s/%s", caDir, caFile),
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: caBundleVolumeName,
- ReadOnly: true,
- MountPath: caDir,
- },
- },
- }
-
- p := corev1.PodSpec{
- Volumes: []corev1.Volume{
- {
- Name: caBundleVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: caBundleVolumeName,
- },
- },
- },
- },
+ fmt.Sprintf("-frontend.tail-proxy-url=%s", url),
+ fmt.Sprintf("-frontend.tail-tls-config.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ fmt.Sprintf("-frontend.tail-tls-config.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-frontend.tail-tls-config.tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-frontend.tail-tls-config.tls-key-path=%s", lokiServerHTTPTLSKey()),
},
}
- if err := mergo.Merge(&d.Spec.Template.Spec.Containers[qfIdx], containerSpec, mergo.WithAppendSlice); err != nil {
+ if err := mergo.Merge(&deployment.Spec.Template.Spec.Containers[qfIdx], containerSpec, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to add tls config args")
}
- if err := mergo.Merge(&d.Spec.Template.Spec, p, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to add tls volumes")
- }
+ serviceName := serviceNameQueryFrontendHTTP(opts.Name)
+ return configureHTTPServicePKI(&deployment.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
+}
- return nil
+func configureQueryFrontendGRPCServicePKI(deployment *appsv1.Deployment, opts Options) error {
+ serviceName := serviceNameQueryFrontendGRPC(opts.Name)
+ return configureGRPCServicePKI(&deployment.Spec.Template.Spec, serviceName)
}
diff --git a/operator/internal/manifests/query-frontend_test.go b/operator/internal/manifests/query-frontend_test.go
index fdb811e4e5b6d..c31bc1004aef6 100644
--- a/operator/internal/manifests/query-frontend_test.go
+++ b/operator/internal/manifests/query-frontend_test.go
@@ -1,17 +1,10 @@
package manifests
import (
- "fmt"
- "path"
"testing"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
- "github.com/grafana/loki/operator/internal/manifests/internal/config"
-
"github.com/stretchr/testify/require"
- appsv1 "k8s.io/api/apps/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestNewQueryFrontendDeployment_SelectorMatchesLabels(t *testing.T) {
@@ -52,10 +45,11 @@ func TestNewQueryFrontendDeployment_HasTemplateConfigHashAnnotation(t *testing.T
require.Equal(t, annotations[expected], "deadbeef")
}
-func TestConfigureQueryFrontendHTTPServicePKI(t *testing.T) {
- opts := Options{
- Name: "abcd",
- Namespace: "efgh",
+func TestNewQueryFrontendDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
+ ss := NewQueryFrontendDeployment(Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ CertRotationRequiredAt: "deadbeef",
Stack: lokiv1.LokiStackSpec{
Template: &lokiv1.LokiTemplateSpec{
QueryFrontend: &lokiv1.LokiComponentSpec{
@@ -63,144 +57,10 @@ func TestConfigureQueryFrontendHTTPServicePKI(t *testing.T) {
},
},
},
- TLSProfile: TLSProfileSpec{
- MinTLSVersion: "TLSVersion1.2",
- Ciphers: []string{"TLS_RSA_WITH_AES_128_CBC_SHA"},
- },
- }
- d := appsv1.Deployment{
- TypeMeta: metav1.TypeMeta{
- Kind: "Deployment",
- APIVersion: appsv1.SchemeGroupVersion.String(),
- },
- Spec: appsv1.DeploymentSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: lokiFrontendContainerName,
- Args: []string{
- "-target=query-frontend",
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: configVolumeName,
- ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
- },
- },
- },
- },
- Volumes: []corev1.Volume{
- {
- Name: configVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName(opts.Name),
- },
- },
- },
- },
- },
- },
- },
- },
- }
-
- caBundleVolumeName := signingCABundleName(opts.Name)
- serviceName := serviceNameQueryFrontendHTTP(opts.Name)
- expected := appsv1.Deployment{
- TypeMeta: metav1.TypeMeta{
- Kind: "Deployment",
- APIVersion: appsv1.SchemeGroupVersion.String(),
- },
- Spec: appsv1.DeploymentSpec{
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: lokiFrontendContainerName,
- Args: []string{
- "-target=query-frontend",
- "-frontend.tail-tls-config.tls-cipher-suites=TLS_RSA_WITH_AES_128_CBC_SHA",
- "-frontend.tail-tls-config.tls-min-version=TLSVersion1.2",
- fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s/%s", caBundleDir, caFile),
- fmt.Sprintf("-server.http-tls-cert-path=%s", path.Join(httpTLSDir, tlsCertFile)),
- fmt.Sprintf("-server.http-tls-key-path=%s", path.Join(httpTLSDir, tlsKeyFile)),
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: configVolumeName,
- ReadOnly: false,
- MountPath: config.LokiConfigMountDir,
- },
- {
- Name: caBundleVolumeName,
- ReadOnly: true,
- MountPath: caBundleDir,
- },
- {
- Name: serviceName,
- ReadOnly: false,
- MountPath: httpTLSDir,
- },
- },
- ReadinessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- LivenessProbe: &corev1.Probe{
- ProbeHandler: corev1.ProbeHandler{
- HTTPGet: &corev1.HTTPGetAction{
- Scheme: corev1.URISchemeHTTPS,
- },
- },
- },
- },
- },
- Volumes: []corev1.Volume{
- {
- Name: configVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: lokiConfigMapName(opts.Name),
- },
- },
- },
- },
- {
- Name: caBundleVolumeName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- DefaultMode: &defaultConfigMapMode,
- LocalObjectReference: corev1.LocalObjectReference{
- Name: caBundleVolumeName,
- },
- },
- },
- },
- {
- Name: serviceName,
- VolumeSource: corev1.VolumeSource{
- Secret: &corev1.SecretVolumeSource{
- SecretName: serviceName,
- },
- },
- },
- },
- },
- },
- },
- }
+ })
- err := configureQueryFrontendHTTPServicePKI(&d, opts)
- require.Nil(t, err)
- require.Equal(t, expected, d)
+ expected := "loki.grafana.com/certRotationRequiredAt"
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, expected)
+ require.Equal(t, annotations[expected], "deadbeef")
}
diff --git a/operator/internal/manifests/ruler.go b/operator/internal/manifests/ruler.go
index e8537279f54ee..6bdf510348b3b 100644
--- a/operator/internal/manifests/ruler.go
+++ b/operator/internal/manifests/ruler.go
@@ -35,8 +35,14 @@ func BuildRuler(opts Options) ([]client.Object, error) {
}
}
- objs := []client.Object{}
+ if opts.Gates.HTTPEncryption || opts.Gates.GRPCEncryption {
+ caBundleName := signingCABundleName(opts.Name)
+ if err := configureServiceCA(&statefulSet.Spec.Template.Spec, caBundleName); err != nil {
+ return nil, err
+ }
+ }
+ objs := []client.Object{}
if opts.Stack.Tenants != nil {
if err := configureRulerStatefulSetForMode(statefulSet, opts.Stack.Tenants.Mode, opts.Name); err != nil {
return nil, err
@@ -157,7 +163,7 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet {
}
l := ComponentLabels(LabelRulerComponent, opts.Name)
- a := commonAnnotations(opts.ConfigSHA1)
+ a := commonAnnotations(opts.ConfigSHA1, opts.CertRotationRequiredAt)
return &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
@@ -230,43 +236,6 @@ func NewRulerStatefulSet(opts Options) *appsv1.StatefulSet {
}
}
-func configureRulerStatefulSetForMode(
- ss *appsv1.StatefulSet, mode lokiv1.ModeType,
- stackName string,
-) error {
- switch mode {
- case lokiv1.Static, lokiv1.Dynamic:
- return nil // nothing to configure
- case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
- caBundleName := signingCABundleName(stackName)
- monitorServerName := fqdn(openshift.MonitoringSVCMain, openshift.MonitoringNS)
- return openshift.ConfigureRulerStatefulSet(
- ss,
- BearerTokenFile,
- caBundleName,
- caBundleDir,
- caFile,
- monitorServerName,
- rulerContainerName,
- )
- }
-
- return nil
-}
-
-func configureRulerObjsForMode(opts Options) []client.Object {
- openShiftObjs := []client.Object{}
-
- switch opts.Stack.Tenants.Mode {
- case lokiv1.Static, lokiv1.Dynamic:
- // nothing to configure
- case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
- openShiftObjs = openshift.BuildRulerObjects(opts.OpenShiftOptions)
- }
-
- return openShiftObjs
-}
-
// NewRulerGRPCService creates a k8s service for the ruler GRPC endpoint
func NewRulerGRPCService(opts Options) *corev1.Service {
serviceName := serviceNameRulerGRPC(opts.Name)
@@ -278,9 +247,8 @@ func NewRulerGRPCService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
@@ -308,9 +276,8 @@ func NewRulerHTTPService(opts Options) *corev1.Service {
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
- Name: serviceName,
- Labels: labels,
- Annotations: serviceAnnotations(serviceName, opts.Gates.OpenShift.ServingCertsService),
+ Name: serviceName,
+ Labels: labels,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
@@ -328,60 +295,39 @@ func NewRulerHTTPService(opts Options) *corev1.Service {
func configureRulerHTTPServicePKI(statefulSet *appsv1.StatefulSet, opts Options) error {
serviceName := serviceNameRulerHTTP(opts.Name)
- return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName)
+ return configureHTTPServicePKI(&statefulSet.Spec.Template.Spec, serviceName, opts.TLSProfile.MinTLSVersion, opts.TLSCipherSuites())
}
func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
- caBundleName := signingCABundleName(opts.Name)
- secretVolumeSpec := corev1.PodSpec{
- Volumes: []corev1.Volume{
- {
- Name: caBundleName,
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: caBundleName,
- },
- },
- },
- },
- },
- }
-
secretContainerSpec := corev1.Container{
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: caBundleName,
- ReadOnly: false,
- MountPath: caBundleDir,
- },
- },
Args: []string{
- // Enable GRPC over TLS for ruler client
- "-ruler.client.tls-enabled=true",
- fmt.Sprintf("-ruler.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-ruler.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-ruler.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ruler.client.tls-server-name=%s", fqdn(serviceNameRulerGRPC(opts.Name), opts.Namespace)),
- // Enable GRPC over TLS for ingester client
- "-ingester.client.tls-enabled=true",
- fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
- fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
- fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
- fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
// Enable GRPC over TLS for boltb-shipper index-gateway client
"-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=%s", opts.TLSCipherSuites()),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(opts.Name), opts.Namespace)),
+ // Enable GRPC over TLS for ingester client
+ "-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-ingester.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(opts.Name), opts.Namespace)),
+ // Enable GRPC over TLS for ruler client
+ "-ruler.client.tls-enabled=true",
+ fmt.Sprintf("-ruler.client.tls-cipher-suites=%s", opts.TLSCipherSuites()),
+ fmt.Sprintf("-ruler.client.tls-min-version=%s", opts.TLSProfile.MinTLSVersion),
+ fmt.Sprintf("-ruler.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ruler.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ruler.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-ruler.client.tls-server-name=%s", fqdn(serviceNameRulerGRPC(opts.Name), opts.Namespace)),
},
}
- if err := mergo.Merge(&sts.Spec.Template.Spec, secretVolumeSpec, mergo.WithAppendSlice); err != nil {
- return kverrors.Wrap(err, "failed to merge volumes")
- }
-
if err := mergo.Merge(&sts.Spec.Template.Spec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
return kverrors.Wrap(err, "failed to merge container")
}
@@ -390,6 +336,43 @@ func configureRulerGRPCServicePKI(sts *appsv1.StatefulSet, opts Options) error {
return configureGRPCServicePKI(&sts.Spec.Template.Spec, serviceName)
}
+func configureRulerStatefulSetForMode(
+ ss *appsv1.StatefulSet, mode lokiv1.ModeType,
+ stackName string,
+) error {
+ switch mode {
+ case lokiv1.Static, lokiv1.Dynamic:
+ return nil // nothing to configure
+ case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
+ bundleName := alertmanagerSigningCABundleName(ss.Name)
+ monitorServerName := fqdn(openshift.MonitoringSVCMain, openshift.MonitoringNS)
+ return openshift.ConfigureRulerStatefulSet(
+ ss,
+ bundleName,
+ BearerTokenFile,
+ alertmanagerUpstreamCADir(),
+ alertmanagerUpstreamCAPath(),
+ monitorServerName,
+ rulerContainerName,
+ )
+ }
+
+ return nil
+}
+
+func configureRulerObjsForMode(opts Options) []client.Object {
+ openShiftObjs := []client.Object{}
+
+ switch opts.Stack.Tenants.Mode {
+ case lokiv1.Static, lokiv1.Dynamic:
+ // nothing to configure
+ case lokiv1.OpenshiftLogging, lokiv1.OpenshiftNetwork:
+ openShiftObjs = openshift.BuildRulerObjects(opts.OpenShiftOptions)
+ }
+
+ return openShiftObjs
+}
+
func ruleVolumeItems(tenants map[string]TenantConfig) []corev1.KeyToPath {
var items []corev1.KeyToPath
diff --git a/operator/internal/manifests/ruler_test.go b/operator/internal/manifests/ruler_test.go
index ff35916375406..2288ab0465354 100644
--- a/operator/internal/manifests/ruler_test.go
+++ b/operator/internal/manifests/ruler_test.go
@@ -1,10 +1,12 @@
package manifests_test
import (
+ "math/rand"
"testing"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests"
+ "github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
@@ -30,6 +32,55 @@ func TestNewRulerStatefulSet_HasTemplateConfigHashAnnotation(t *testing.T) {
require.Equal(t, annotations[expected], "deadbeef")
}
+func TestNewRulerStatefulSet_HasTemplateCertRotationRequiredAtAnnotation(t *testing.T) {
+ ss := manifests.NewRulerStatefulSet(manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ CertRotationRequiredAt: "deadbeef",
+ Stack: lokiv1.LokiStackSpec{
+ StorageClassName: "standard",
+ Template: &lokiv1.LokiTemplateSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ })
+ expected := "loki.grafana.com/certRotationRequiredAt"
+ annotations := ss.Spec.Template.Annotations
+ require.Contains(t, annotations, expected)
+ require.Equal(t, annotations[expected], "deadbeef")
+}
+
+func TestBuildRuler_HasExtraObjectsForTenantMode(t *testing.T) {
+ objs, err := manifests.BuildRuler(manifests.Options{
+ Name: "abcd",
+ Namespace: "efgh",
+ OpenShiftOptions: openshift.Options{
+ BuildOpts: openshift.BuildOptions{
+ LokiStackName: "abc",
+ LokiStackNamespace: "efgh",
+ },
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Template: &lokiv1.LokiTemplateSpec{
+ Ruler: &lokiv1.LokiComponentSpec{
+ Replicas: rand.Int31(),
+ },
+ },
+ Rules: &lokiv1.RulesSpec{
+ Enabled: true,
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ },
+ },
+ })
+
+ require.NoError(t, err)
+ require.Len(t, objs, 7)
+}
+
func TestNewRulerStatefulSet_SelectorMatchesLabels(t *testing.T) {
// You must set the .spec.selector field of a StatefulSet to match the labels of
// its .spec.template.metadata.labels. Prior to Kubernetes 1.8, the
diff --git a/operator/internal/manifests/service.go b/operator/internal/manifests/service.go
index 19ce5fea05549..8ed6623452718 100644
--- a/operator/internal/manifests/service.go
+++ b/operator/internal/manifests/service.go
@@ -2,13 +2,50 @@ package manifests
import (
"fmt"
- "path"
"github.com/ViaQ/logerr/v2/kverrors"
"github.com/imdario/mergo"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
)
+func configureServiceCA(podSpec *corev1.PodSpec, caBundleName string) error {
+ secretVolumeSpec := corev1.PodSpec{
+ Volumes: []corev1.Volume{
+ {
+ Name: caBundleName,
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: caBundleName,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ secretContainerSpec := corev1.Container{
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: caBundleName,
+ ReadOnly: false,
+ MountPath: caBundleDir,
+ },
+ },
+ }
+
+ if err := mergo.Merge(podSpec, secretVolumeSpec, mergo.WithAppendSlice); err != nil {
+ return kverrors.Wrap(err, "failed to merge volumes")
+ }
+
+ if err := mergo.Merge(&podSpec.Containers[0], secretContainerSpec, mergo.WithAppendSlice); err != nil {
+ return kverrors.Wrap(err, "failed to merge container")
+ }
+
+ return nil
+}
+
func configureGRPCServicePKI(podSpec *corev1.PodSpec, serviceName string) error {
secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
@@ -27,12 +64,14 @@ func configureGRPCServicePKI(podSpec *corev1.PodSpec, serviceName string) error
{
Name: serviceName,
ReadOnly: false,
- MountPath: grpcTLSDir,
+ MountPath: lokiServerGRPCTLSDir(),
},
},
Args: []string{
- fmt.Sprintf("-server.grpc-tls-cert-path=%s", path.Join(grpcTLSDir, tlsCertFile)),
- fmt.Sprintf("-server.grpc-tls-key-path=%s", path.Join(grpcTLSDir, tlsKeyFile)),
+ fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
+ "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
},
}
@@ -47,7 +86,7 @@ func configureGRPCServicePKI(podSpec *corev1.PodSpec, serviceName string) error
return nil
}
-func configureHTTPServicePKI(podSpec *corev1.PodSpec, serviceName string) error {
+func configureHTTPServicePKI(podSpec *corev1.PodSpec, serviceName, minTLSVersion, tlsCipherSuites string) error {
secretVolumeSpec := corev1.PodSpec{
Volumes: []corev1.Volume{
{
@@ -60,24 +99,44 @@ func configureHTTPServicePKI(podSpec *corev1.PodSpec, serviceName string) error
},
},
}
+
secretContainerSpec := corev1.Container{
VolumeMounts: []corev1.VolumeMount{
{
Name: serviceName,
ReadOnly: false,
- MountPath: httpTLSDir,
+ MountPath: lokiServerHTTPTLSDir(),
},
},
Args: []string{
- fmt.Sprintf("-server.http-tls-cert-path=%s", path.Join(httpTLSDir, tlsCertFile)),
- fmt.Sprintf("-server.http-tls-key-path=%s", path.Join(httpTLSDir, tlsKeyFile)),
+ // Expose ready handler through internal server without requiring mTLS
+ "-internal-server.enable=true",
+ "-internal-server.http-listen-address=",
+ fmt.Sprintf("-internal-server.http-tls-min-version=%s", minTLSVersion),
+ fmt.Sprintf("-internal-server.http-tls-cipher-suites=%s", tlsCipherSuites),
+ fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ // Require mTLS for any other handler
+ fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-server.http-tls-client-auth=RequireAndVerifyClientCert",
+ },
+ Ports: []corev1.ContainerPort{
+ {
+ Name: lokiInternalHTTPPortName,
+ ContainerPort: internalHTTPPort,
+ Protocol: protocolTCP,
+ },
},
}
+
uriSchemeContainerSpec := corev1.Container{
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
+ Port: intstr.FromInt(internalHTTPPort),
},
},
},
@@ -85,6 +144,7 @@ func configureHTTPServicePKI(podSpec *corev1.PodSpec, serviceName string) error
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
+ Port: intstr.FromInt(internalHTTPPort),
},
},
},
diff --git a/operator/internal/manifests/service_monitor.go b/operator/internal/manifests/service_monitor.go
index bf8b62a90ba5d..9d9b3636cc6c2 100644
--- a/operator/internal/manifests/service_monitor.go
+++ b/operator/internal/manifests/service_monitor.go
@@ -28,7 +28,7 @@ func NewDistributorServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(DistributorName(opts.Name))
serviceName := serviceNameDistributorHTTP(opts.Name)
- lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
+ lokiEndpoint := lokiServiceMonitorEndpoint(opts.Name, lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@@ -39,7 +39,7 @@ func NewIngesterServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(IngesterName(opts.Name))
serviceName := serviceNameIngesterHTTP(opts.Name)
- lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
+ lokiEndpoint := lokiServiceMonitorEndpoint(opts.Name, lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@@ -50,7 +50,7 @@ func NewQuerierServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(QuerierName(opts.Name))
serviceName := serviceNameQuerierHTTP(opts.Name)
- lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
+ lokiEndpoint := lokiServiceMonitorEndpoint(opts.Name, lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@@ -61,7 +61,7 @@ func NewCompactorServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(CompactorName(opts.Name))
serviceName := serviceNameCompactorHTTP(opts.Name)
- lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
+ lokiEndpoint := lokiServiceMonitorEndpoint(opts.Name, lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@@ -72,7 +72,7 @@ func NewQueryFrontendServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(QueryFrontendName(opts.Name))
serviceName := serviceNameQueryFrontendHTTP(opts.Name)
- lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
+ lokiEndpoint := lokiServiceMonitorEndpoint(opts.Name, lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@@ -83,7 +83,7 @@ func NewIndexGatewayServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(IndexGatewayName(opts.Name))
serviceName := serviceNameIndexGatewayHTTP(opts.Name)
- lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
+ lokiEndpoint := lokiServiceMonitorEndpoint(opts.Name, lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@@ -94,7 +94,7 @@ func NewRulerServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
serviceMonitorName := serviceMonitorName(RulerName(opts.Name))
serviceName := serviceNameRulerHTTP(opts.Name)
- lokiEndpoint := serviceMonitorEndpoint(lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
+ lokiEndpoint := lokiServiceMonitorEndpoint(opts.Name, lokiHTTPPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
return newServiceMonitor(opts.Namespace, serviceMonitorName, l, lokiEndpoint)
}
@@ -103,14 +103,15 @@ func NewRulerServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
func NewGatewayServiceMonitor(opts Options) *monitoringv1.ServiceMonitor {
l := ComponentLabels(LabelGatewayComponent, opts.Name)
- serviceMonitorName := serviceMonitorName(GatewayName(opts.Name))
+ gatewayName := GatewayName(opts.Name)
+ serviceMonitorName := serviceMonitorName(gatewayName)
serviceName := serviceNameGatewayHTTP(opts.Name)
- gwEndpoint := serviceMonitorEndpoint(gatewayInternalPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
+ gwEndpoint := gatewayServiceMonitorEndpoint(gatewayName, gatewayInternalPortName, serviceName, opts.Namespace, opts.Gates.ServiceMonitorTLSEndpoints)
sm := newServiceMonitor(opts.Namespace, serviceMonitorName, l, gwEndpoint)
if opts.Stack.Tenants != nil {
- if err := configureGatewayServiceMonitorForMode(sm, opts.Stack.Tenants.Mode, opts.Gates); err != nil {
+ if err := configureGatewayServiceMonitorForMode(sm, opts); err != nil {
return sm
}
}
diff --git a/operator/internal/manifests/service_monitor_test.go b/operator/internal/manifests/service_monitor_test.go
index 34661dce10aab..de46dfbda14f7 100644
--- a/operator/internal/manifests/service_monitor_test.go
+++ b/operator/internal/manifests/service_monitor_test.go
@@ -22,11 +22,9 @@ func TestServiceMonitorMatchLabels(t *testing.T) {
}
featureGates := configv1.FeatureGates{
+ BuiltInCertManagement: configv1.BuiltInCertManagement{Enabled: true},
ServiceMonitors: true,
ServiceMonitorTLSEndpoints: true,
- OpenShift: configv1.OpenShiftFeatureGates{
- ServingCertsService: true,
- },
}
opt := Options{
@@ -114,14 +112,16 @@ func TestServiceMonitorMatchLabels(t *testing.T) {
}
}
-func TestServiceMonitorEndpoints_ForOpenShiftLoggingMode(t *testing.T) {
+func TestServiceMonitorEndpoints_ForBuiltInCertRotation(t *testing.T) {
+ type test struct {
+ Service *corev1.Service
+ ServiceMonitor *monitoringv1.ServiceMonitor
+ }
+
featureGates := configv1.FeatureGates{
- LokiStackGateway: true,
+ BuiltInCertManagement: configv1.BuiltInCertManagement{Enabled: true},
ServiceMonitors: true,
ServiceMonitorTLSEndpoints: true,
- OpenShift: configv1.OpenShiftFeatureGates{
- ServingCertsService: true,
- },
}
opt := Options{
@@ -131,17 +131,398 @@ func TestServiceMonitorEndpoints_ForOpenShiftLoggingMode(t *testing.T) {
Gates: featureGates,
Stack: lokiv1.LokiStackSpec{
Size: lokiv1.SizeOneXExtraSmall,
- Tenants: &lokiv1.TenantsSpec{
- Mode: lokiv1.OpenshiftLogging,
- },
Template: &lokiv1.LokiTemplateSpec{
- Gateway: &lokiv1.LokiComponentSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Distributor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Ingester: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Querier: &lokiv1.LokiComponentSpec{
Replicas: 1,
},
+ QueryFrontend: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ IndexGateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Ruler: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ }
+
+ table := []test{
+ {
+ Service: NewDistributorHTTPService(opt),
+ ServiceMonitor: NewDistributorServiceMonitor(opt),
+ },
+ {
+ Service: NewIngesterHTTPService(opt),
+ ServiceMonitor: NewIngesterServiceMonitor(opt),
+ },
+ {
+ Service: NewQuerierHTTPService(opt),
+ ServiceMonitor: NewQuerierServiceMonitor(opt),
+ },
+ {
+ Service: NewQueryFrontendHTTPService(opt),
+ ServiceMonitor: NewQueryFrontendServiceMonitor(opt),
+ },
+ {
+ Service: NewCompactorHTTPService(opt),
+ ServiceMonitor: NewCompactorServiceMonitor(opt),
+ },
+ {
+ Service: NewIndexGatewayHTTPService(opt),
+ ServiceMonitor: NewIndexGatewayServiceMonitor(opt),
+ },
+ {
+ Service: NewRulerHTTPService(opt),
+ ServiceMonitor: NewRulerServiceMonitor(opt),
+ },
+ }
+
+ for _, tst := range table {
+ testName := fmt.Sprintf("%s_%s", tst.Service.GetName(), tst.ServiceMonitor.GetName())
+ t.Run(testName, func(t *testing.T) {
+ t.Parallel()
+
+ require.NotNil(t, tst.ServiceMonitor.Spec.Endpoints)
+ require.NotNil(t, tst.ServiceMonitor.Spec.Endpoints[0].TLSConfig)
+
+ // Do not use bearer authentication for loki endpoints
+ require.Empty(t, tst.ServiceMonitor.Spec.Endpoints[0].BearerTokenFile)
+ require.Empty(t, tst.ServiceMonitor.Spec.Endpoints[0].BearerTokenSecret)
+
+ // Check using built-in PKI
+ c := tst.ServiceMonitor.Spec.Endpoints[0].TLSConfig
+ require.Equal(t, c.CA.ConfigMap.LocalObjectReference.Name, signingCABundleName(opt.Name))
+ require.Equal(t, c.Cert.Secret.LocalObjectReference.Name, tst.Service.Name)
+ require.Equal(t, c.KeySecret.LocalObjectReference.Name, tst.Service.Name)
+ })
+ }
+}
+
+func TestServiceMonitorEndpoints_ForGatewayServiceMonitor(t *testing.T) {
+ tt := []struct {
+ desc string
+ opts Options
+ total int
+ want []monitoringv1.Endpoint
+ }{
+ {
+ desc: "default",
+ opts: Options{
+ Name: "test",
+ Namespace: "test",
+ Image: "test",
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ },
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ },
+ total: 1,
+ want: []monitoringv1.Endpoint{
+ {
+ Port: gatewayInternalPortName,
+ Path: "/metrics",
+ Scheme: "http",
+ },
+ },
+ },
+ {
+ desc: "with http encryption",
+ opts: Options{
+ Name: "test",
+ Namespace: "test",
+ Image: "test",
+ Gates: configv1.FeatureGates{
+ LokiStackGateway: true,
+ BuiltInCertManagement: configv1.BuiltInCertManagement{Enabled: true},
+ ServiceMonitors: true,
+ ServiceMonitorTLSEndpoints: true,
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.Static,
+ },
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ },
+ total: 1,
+ want: []monitoringv1.Endpoint{
+ {
+ Port: gatewayInternalPortName,
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ TLSConfig: &monitoringv1.TLSConfig{
+ SafeTLSConfig: monitoringv1.SafeTLSConfig{
+ CA: monitoringv1.SecretOrConfigMap{
+ ConfigMap: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: gatewaySigningCABundleName("test-gateway"),
+ },
+ Key: caFile,
+ },
+ },
+ ServerName: "test-gateway-http.test.svc.cluster.local",
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "openshift-logging",
+ opts: Options{
+ Name: "test",
+ Namespace: "test",
+ Image: "test",
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ },
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ },
+ total: 2,
+ want: []monitoringv1.Endpoint{
+ {
+ Port: gatewayInternalPortName,
+ Path: "/metrics",
+ Scheme: "http",
+ },
+ {
+ Port: "opa-metrics",
+ Path: "/metrics",
+ Scheme: "http",
+ },
+ },
+ },
+ {
+ desc: "openshift-logging with http encryption",
+ opts: Options{
+ Name: "test",
+ Namespace: "test",
+ Image: "test",
+ Gates: configv1.FeatureGates{
+ LokiStackGateway: true,
+ BuiltInCertManagement: configv1.BuiltInCertManagement{Enabled: true},
+ ServiceMonitors: true,
+ ServiceMonitorTLSEndpoints: true,
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftLogging,
+ },
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ },
+ total: 2,
+ want: []monitoringv1.Endpoint{
+ {
+ Port: gatewayInternalPortName,
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ TLSConfig: &monitoringv1.TLSConfig{
+ SafeTLSConfig: monitoringv1.SafeTLSConfig{
+ CA: monitoringv1.SecretOrConfigMap{
+ ConfigMap: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: gatewaySigningCABundleName("test-gateway"),
+ },
+ Key: caFile,
+ },
+ },
+ ServerName: "test-gateway-http.test.svc.cluster.local",
+ },
+ },
+ },
+ {
+ Port: "opa-metrics",
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ TLSConfig: &monitoringv1.TLSConfig{
+ SafeTLSConfig: monitoringv1.SafeTLSConfig{
+ CA: monitoringv1.SecretOrConfigMap{
+ ConfigMap: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: gatewaySigningCABundleName("test-gateway"),
+ },
+ Key: caFile,
+ },
+ },
+ ServerName: "test-gateway-http.test.svc.cluster.local",
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "openshift-network",
+ opts: Options{
+ Name: "test",
+ Namespace: "test",
+ Image: "test",
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftNetwork,
+ },
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ },
+ total: 2,
+ want: []monitoringv1.Endpoint{
+ {
+ Port: gatewayInternalPortName,
+ Path: "/metrics",
+ Scheme: "http",
+ },
+ {
+ Port: "opa-metrics",
+ Path: "/metrics",
+ Scheme: "http",
+ },
+ },
+ },
+ {
+ desc: "openshift-network with http encryption",
+ opts: Options{
+ Name: "test",
+ Namespace: "test",
+ Image: "test",
+ Gates: configv1.FeatureGates{
+ LokiStackGateway: true,
+ BuiltInCertManagement: configv1.BuiltInCertManagement{Enabled: true},
+ ServiceMonitors: true,
+ ServiceMonitorTLSEndpoints: true,
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: lokiv1.OpenshiftNetwork,
+ },
+ Template: &lokiv1.LokiTemplateSpec{
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ },
+ total: 2,
+ want: []monitoringv1.Endpoint{
+ {
+ Port: gatewayInternalPortName,
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ TLSConfig: &monitoringv1.TLSConfig{
+ SafeTLSConfig: monitoringv1.SafeTLSConfig{
+ CA: monitoringv1.SecretOrConfigMap{
+ ConfigMap: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: gatewaySigningCABundleName("test-gateway"),
+ },
+ Key: caFile,
+ },
+ },
+ ServerName: "test-gateway-http.test.svc.cluster.local",
+ },
+ },
+ },
+ {
+ Port: "opa-metrics",
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "test-gateway-token",
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ TLSConfig: &monitoringv1.TLSConfig{
+ SafeTLSConfig: monitoringv1.SafeTLSConfig{
+ CA: monitoringv1.SecretOrConfigMap{
+ ConfigMap: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: gatewaySigningCABundleName("test-gateway"),
+ },
+ Key: caFile,
+ },
+ },
+ ServerName: "test-gateway-http.test.svc.cluster.local",
+ },
+ },
+ },
},
},
}
- sm := NewGatewayServiceMonitor(opt)
- require.Len(t, sm.Spec.Endpoints, 2)
+ for _, tc := range tt {
+ tc := tc
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+ sm := NewGatewayServiceMonitor(tc.opts)
+ require.Len(t, sm.Spec.Endpoints, tc.total)
+
+ for _, endpoint := range tc.want {
+ require.Contains(t, sm.Spec.Endpoints, endpoint)
+ }
+ })
+ }
}
diff --git a/operator/internal/manifests/service_test.go b/operator/internal/manifests/service_test.go
index 41a197650ba78..4f935b5bfd5d1 100644
--- a/operator/internal/manifests/service_test.go
+++ b/operator/internal/manifests/service_test.go
@@ -2,10 +2,14 @@ package manifests
import (
"fmt"
+ "strings"
"testing"
+ configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -257,3 +261,710 @@ func TestServicesMatchLabels(t *testing.T) {
}
}
}
+
+func TestServices_WithEncryption(t *testing.T) {
+ const (
+ stackName = "test"
+ stackNs = "ns"
+ )
+
+ opts := Options{
+ Name: stackName,
+ Namespace: stackNs,
+ Gates: configv1.FeatureGates{
+ HTTPEncryption: true,
+ GRPCEncryption: true,
+ },
+ Stack: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Template: &lokiv1.LokiTemplateSpec{
+ Compactor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Distributor: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Ingester: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Querier: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ QueryFrontend: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Gateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ IndexGateway: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ Ruler: &lokiv1.LokiComponentSpec{
+ Replicas: 1,
+ },
+ },
+ },
+ TLSProfile: TLSProfileSpec{
+ MinTLSVersion: "VersionTLS12",
+ Ciphers: []string{"cipher1", "cipher2"},
+ },
+ }
+
+ tt := []struct {
+ desc string
+ buildFunc func(Options) ([]client.Object, error)
+ wantArgs []string
+ wantPorts []corev1.ContainerPort
+ wantVolumeMounts []corev1.VolumeMount
+ wantVolumes []corev1.Volume
+ }{
+ {
+ desc: "compactor",
+ buildFunc: BuildCompactor,
+ wantArgs: []string{
+ "-internal-server.enable=true",
+ "-internal-server.http-listen-address=",
+ fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.http-tls-min-version=VersionTLS12",
+ "-server.tls-cipher-suites=cipher1,cipher2",
+ "-server.tls-min-version=VersionTLS12",
+ fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-server.http-tls-client-auth=RequireAndVerifyClientCert",
+ fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
+ "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
+ },
+ wantPorts: []corev1.ContainerPort{
+ {
+ Name: lokiInternalHTTPPortName,
+ ContainerPort: internalHTTPPort,
+ Protocol: protocolTCP,
+ },
+ },
+ wantVolumeMounts: []corev1.VolumeMount{
+ {
+ Name: serviceNameCompactorHTTP(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerHTTPTLSDir(),
+ },
+ {
+ Name: serviceNameCompactorGRPC(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerGRPCTLSDir(),
+ },
+ {
+ Name: signingCABundleName(stackName),
+ ReadOnly: false,
+ MountPath: caBundleDir,
+ },
+ },
+ wantVolumes: []corev1.Volume{
+ {
+ Name: serviceNameCompactorHTTP(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameCompactorHTTP(stackName),
+ },
+ },
+ },
+ {
+ Name: serviceNameCompactorGRPC(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameCompactorGRPC(stackName),
+ },
+ },
+ },
+ {
+ Name: signingCABundleName(stackName),
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: signingCABundleName(stackName),
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "distributor",
+ buildFunc: BuildDistributor,
+ wantArgs: []string{
+ "-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
+ "-ingester.client.tls-min-version=VersionTLS12",
+ "-ingester.client.tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.enable=true",
+ "-internal-server.http-listen-address=",
+ fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.http-tls-min-version=VersionTLS12",
+ "-server.tls-cipher-suites=cipher1,cipher2",
+ "-server.tls-min-version=VersionTLS12",
+ fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-server.http-tls-client-auth=RequireAndVerifyClientCert",
+ fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
+ "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
+ },
+ wantPorts: []corev1.ContainerPort{
+ {
+ Name: lokiInternalHTTPPortName,
+ ContainerPort: internalHTTPPort,
+ Protocol: protocolTCP,
+ },
+ },
+ wantVolumeMounts: []corev1.VolumeMount{
+ {
+ Name: serviceNameDistributorHTTP(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerHTTPTLSDir(),
+ },
+ {
+ Name: serviceNameDistributorGRPC(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerGRPCTLSDir(),
+ },
+ {
+ Name: signingCABundleName(stackName),
+ ReadOnly: false,
+ MountPath: caBundleDir,
+ },
+ },
+ wantVolumes: []corev1.Volume{
+ {
+ Name: serviceNameDistributorHTTP(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameDistributorHTTP(stackName),
+ },
+ },
+ },
+ {
+ Name: serviceNameDistributorGRPC(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameDistributorGRPC(stackName),
+ },
+ },
+ },
+ {
+ Name: signingCABundleName(stackName),
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: signingCABundleName(stackName),
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "index-gateway",
+ buildFunc: BuildIndexGateway,
+ wantArgs: []string{
+ "-internal-server.enable=true",
+ "-internal-server.http-listen-address=",
+ fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.http-tls-min-version=VersionTLS12",
+ "-server.tls-cipher-suites=cipher1,cipher2",
+ "-server.tls-min-version=VersionTLS12",
+ fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-server.http-tls-client-auth=RequireAndVerifyClientCert",
+ fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
+ "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
+ },
+ wantPorts: []corev1.ContainerPort{
+ {
+ Name: lokiInternalHTTPPortName,
+ ContainerPort: internalHTTPPort,
+ Protocol: protocolTCP,
+ },
+ },
+ wantVolumeMounts: []corev1.VolumeMount{
+ {
+ Name: serviceNameIndexGatewayHTTP(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerHTTPTLSDir(),
+ },
+ {
+ Name: serviceNameIndexGatewayGRPC(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerGRPCTLSDir(),
+ },
+ {
+ Name: signingCABundleName(stackName),
+ ReadOnly: false,
+ MountPath: caBundleDir,
+ },
+ },
+ wantVolumes: []corev1.Volume{
+ {
+ Name: serviceNameIndexGatewayHTTP(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameIndexGatewayHTTP(stackName),
+ },
+ },
+ },
+ {
+ Name: serviceNameIndexGatewayGRPC(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameIndexGatewayGRPC(stackName),
+ },
+ },
+ },
+ {
+ Name: signingCABundleName(stackName),
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: signingCABundleName(stackName),
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "ingester",
+ buildFunc: BuildIngester,
+ wantArgs: []string{
+ "-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
+ "-ingester.client.tls-min-version=VersionTLS12",
+ "-ingester.client.tls-cipher-suites=cipher1,cipher2",
+ "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNs)),
+ "-boltdb.shipper.index-gateway-client.grpc.tls-min-version=VersionTLS12",
+ "-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.enable=true",
+ "-internal-server.http-listen-address=",
+ fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.http-tls-min-version=VersionTLS12",
+ "-server.tls-cipher-suites=cipher1,cipher2",
+ "-server.tls-min-version=VersionTLS12",
+ fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-server.http-tls-client-auth=RequireAndVerifyClientCert",
+ fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
+ "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
+ },
+ wantPorts: []corev1.ContainerPort{
+ {
+ Name: lokiInternalHTTPPortName,
+ ContainerPort: internalHTTPPort,
+ Protocol: protocolTCP,
+ },
+ },
+ wantVolumeMounts: []corev1.VolumeMount{
+ {
+ Name: serviceNameIngesterHTTP(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerHTTPTLSDir(),
+ },
+ {
+ Name: serviceNameIngesterGRPC(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerGRPCTLSDir(),
+ },
+ {
+ Name: signingCABundleName(stackName),
+ ReadOnly: false,
+ MountPath: caBundleDir,
+ },
+ },
+ wantVolumes: []corev1.Volume{
+ {
+ Name: serviceNameIngesterHTTP(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameIngesterHTTP(stackName),
+ },
+ },
+ },
+ {
+ Name: serviceNameIngesterGRPC(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameIngesterGRPC(stackName),
+ },
+ },
+ },
+ {
+ Name: signingCABundleName(stackName),
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: signingCABundleName(stackName),
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "querier",
+ buildFunc: BuildQuerier,
+ wantArgs: []string{
+ "-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
+ "-ingester.client.tls-min-version=VersionTLS12",
+ "-ingester.client.tls-cipher-suites=cipher1,cipher2",
+ "-querier.frontend-client.tls-enabled=true",
+ fmt.Sprintf("-querier.frontend-client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-querier.frontend-client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-querier.frontend-client.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-querier.frontend-client.tls-server-name=%s", fqdn(serviceNameQueryFrontendGRPC(stackName), stackNs)),
+ "-querier.frontend-client.tls-min-version=VersionTLS12",
+ "-querier.frontend-client.tls-cipher-suites=cipher1,cipher2",
+ "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNs)),
+ "-boltdb.shipper.index-gateway-client.grpc.tls-min-version=VersionTLS12",
+ "-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.enable=true",
+ "-internal-server.http-listen-address=",
+ fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.http-tls-min-version=VersionTLS12",
+ "-server.tls-cipher-suites=cipher1,cipher2",
+ "-server.tls-min-version=VersionTLS12",
+ fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-server.http-tls-client-auth=RequireAndVerifyClientCert",
+ fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
+ "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
+ },
+ wantPorts: []corev1.ContainerPort{
+ {
+ Name: lokiInternalHTTPPortName,
+ ContainerPort: internalHTTPPort,
+ Protocol: protocolTCP,
+ },
+ },
+ wantVolumeMounts: []corev1.VolumeMount{
+ {
+ Name: serviceNameQuerierHTTP(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerHTTPTLSDir(),
+ },
+ {
+ Name: serviceNameQuerierGRPC(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerGRPCTLSDir(),
+ },
+ {
+ Name: signingCABundleName(stackName),
+ ReadOnly: false,
+ MountPath: caBundleDir,
+ },
+ },
+ wantVolumes: []corev1.Volume{
+ {
+ Name: serviceNameQuerierHTTP(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameQuerierHTTP(stackName),
+ },
+ },
+ },
+ {
+ Name: serviceNameQuerierGRPC(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameQuerierGRPC(stackName),
+ },
+ },
+ },
+ {
+ Name: signingCABundleName(stackName),
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: signingCABundleName(stackName),
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "query-frontend",
+ buildFunc: BuildQueryFrontend,
+ wantArgs: []string{
+ "-frontend.tail-tls-config.tls-min-version=VersionTLS12",
+ "-frontend.tail-tls-config.tls-cipher-suites=cipher1,cipher2",
+ fmt.Sprintf("-frontend.tail-tls-config.tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-frontend.tail-tls-config.tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-frontend.tail-proxy-url=https://test-querier-http.ns.svc.cluster.local:3100",
+ fmt.Sprintf("-frontend.tail-tls-config.tls-ca-path=%s", signingCAPath()),
+ "-internal-server.enable=true",
+ "-internal-server.http-listen-address=",
+ fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.http-tls-min-version=VersionTLS12",
+ "-server.tls-cipher-suites=cipher1,cipher2",
+ "-server.tls-min-version=VersionTLS12",
+ fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-server.http-tls-client-auth=RequireAndVerifyClientCert",
+ fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
+ "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
+ },
+ wantPorts: []corev1.ContainerPort{
+ {
+ Name: lokiInternalHTTPPortName,
+ ContainerPort: internalHTTPPort,
+ Protocol: protocolTCP,
+ },
+ },
+ wantVolumeMounts: []corev1.VolumeMount{
+ {
+ Name: serviceNameQueryFrontendHTTP(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerHTTPTLSDir(),
+ },
+ {
+ Name: serviceNameQueryFrontendGRPC(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerGRPCTLSDir(),
+ },
+ {
+ Name: signingCABundleName(stackName),
+ ReadOnly: false,
+ MountPath: caBundleDir,
+ },
+ },
+ wantVolumes: []corev1.Volume{
+ {
+ Name: serviceNameQueryFrontendHTTP(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameQueryFrontendHTTP(stackName),
+ },
+ },
+ },
+ {
+ Name: serviceNameQueryFrontendGRPC(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameQueryFrontendGRPC(stackName),
+ },
+ },
+ },
+ {
+ Name: signingCABundleName(stackName),
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: signingCABundleName(stackName),
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ desc: "ruler",
+ buildFunc: BuildRuler,
+ wantArgs: []string{
+ "-boltdb.shipper.index-gateway-client.grpc.tls-enabled=true",
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-boltdb.shipper.index-gateway-client.grpc.tls-server-name=%s", fqdn(serviceNameIndexGatewayGRPC(stackName), stackNs)),
+ "-boltdb.shipper.index-gateway-client.grpc.tls-min-version=VersionTLS12",
+ "-boltdb.shipper.index-gateway-client.grpc.tls-cipher-suites=cipher1,cipher2",
+ "-ingester.client.tls-enabled=true",
+ fmt.Sprintf("-ingester.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ingester.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ingester.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-ingester.client.tls-server-name=%s", fqdn(serviceNameIngesterGRPC(stackName), stackNs)),
+ "-ingester.client.tls-min-version=VersionTLS12",
+ "-ingester.client.tls-cipher-suites=cipher1,cipher2",
+ "-ruler.client.tls-enabled=true",
+ fmt.Sprintf("-ruler.client.tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-ruler.client.tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-ruler.client.tls-key-path=%s", lokiServerGRPCTLSKey()),
+ fmt.Sprintf("-ruler.client.tls-server-name=%s", fqdn(serviceNameRulerGRPC(stackName), stackNs)),
+ "-ruler.client.tls-min-version=VersionTLS12",
+ "-ruler.client.tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.enable=true",
+ "-internal-server.http-listen-address=",
+ fmt.Sprintf("-internal-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-internal-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-internal-server.http-tls-cipher-suites=cipher1,cipher2",
+ "-internal-server.http-tls-min-version=VersionTLS12",
+ "-server.tls-cipher-suites=cipher1,cipher2",
+ "-server.tls-min-version=VersionTLS12",
+ fmt.Sprintf("-server.http-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.http-tls-cert-path=%s", lokiServerHTTPTLSCert()),
+ fmt.Sprintf("-server.http-tls-key-path=%s", lokiServerHTTPTLSKey()),
+ "-server.http-tls-client-auth=RequireAndVerifyClientCert",
+ fmt.Sprintf("-server.grpc-tls-ca-path=%s", signingCAPath()),
+ fmt.Sprintf("-server.grpc-tls-cert-path=%s", lokiServerGRPCTLSCert()),
+ fmt.Sprintf("-server.grpc-tls-key-path=%s", lokiServerGRPCTLSKey()),
+ "-server.grpc-tls-client-auth=RequireAndVerifyClientCert",
+ },
+ wantPorts: []corev1.ContainerPort{
+ {
+ Name: lokiInternalHTTPPortName,
+ ContainerPort: internalHTTPPort,
+ Protocol: protocolTCP,
+ },
+ },
+ wantVolumeMounts: []corev1.VolumeMount{
+ {
+ Name: serviceNameRulerHTTP(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerHTTPTLSDir(),
+ },
+ {
+ Name: serviceNameRulerGRPC(stackName),
+ ReadOnly: false,
+ MountPath: lokiServerGRPCTLSDir(),
+ },
+ {
+ Name: signingCABundleName(stackName),
+ ReadOnly: false,
+ MountPath: caBundleDir,
+ },
+ },
+ wantVolumes: []corev1.Volume{
+ {
+ Name: serviceNameRulerHTTP(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameRulerHTTP(stackName),
+ },
+ },
+ },
+ {
+ Name: serviceNameRulerGRPC(stackName),
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: serviceNameRulerGRPC(stackName),
+ },
+ },
+ },
+ {
+ Name: signingCABundleName(stackName),
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: signingCABundleName(stackName),
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ for _, test := range tt {
+ test := test
+ t.Run(test.desc, func(t *testing.T) {
+ t.Parallel()
+ objs, err := test.buildFunc(opts)
+ require.NoError(t, err)
+
+ var pod *corev1.PodSpec
+ switch o := objs[0].(type) {
+ case *appsv1.Deployment:
+ pod = &o.Spec.Template.Spec
+ case *appsv1.StatefulSet:
+ pod = &o.Spec.Template.Spec
+ default:
+ t.Fatal("Wrong object type given")
+ }
+
+ isEncryptionRelated := func(s string) bool {
+ return strings.Contains(s, "internal-server") || // Healthcheck server
+ strings.Contains(s, "client") || // Client certificates
+ strings.Contains(s, "-http") || // Serving HTTP certificates
+ strings.Contains(s, "-grpc") || // Serving GRPC certificates
+ strings.Contains(s, "ca") // Certificate authorities
+ }
+
+ // Check args not missing
+ for _, arg := range test.wantArgs {
+ require.Contains(t, pod.Containers[0].Args, arg)
+ }
+ for _, arg := range pod.Containers[0].Args {
+ if isEncryptionRelated(arg) {
+ require.Contains(t, test.wantArgs, arg)
+ }
+ }
+
+ // Check ports not missing
+ for _, port := range test.wantPorts {
+ require.Contains(t, pod.Containers[0].Ports, port)
+ }
+
+ // Check mounts not missing
+ for _, mount := range test.wantVolumeMounts {
+ require.Contains(t, pod.Containers[0].VolumeMounts, mount)
+ }
+ for _, mount := range pod.Containers[0].VolumeMounts {
+ if isEncryptionRelated(mount.Name) {
+ require.Contains(t, test.wantVolumeMounts, mount)
+ }
+ }
+
+ // Check volumes not missing
+ for _, volume := range test.wantVolumes {
+ require.Contains(t, pod.Volumes, volume)
+ }
+ for _, volume := range pod.Volumes {
+ if isEncryptionRelated(volume.Name) {
+ require.Contains(t, test.wantVolumes, volume)
+ }
+ }
+ })
+ }
+}
diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go
index d211f98ebfc6f..e6eff29f4b7b2 100644
--- a/operator/internal/manifests/var.go
+++ b/operator/internal/manifests/var.go
@@ -13,14 +13,16 @@ import (
)
const (
- gossipPort = 7946
- httpPort = 3100
- grpcPort = 9095
- protocolTCP = "TCP"
+ gossipPort = 7946
+ httpPort = 3100
+ internalHTTPPort = 3101
+ grpcPort = 9095
+ protocolTCP = "TCP"
- lokiHTTPPortName = "metrics"
- lokiGRPCPortName = "grpclb"
- lokiGossipPortName = "gossip-ring"
+ lokiHTTPPortName = "metrics"
+ lokiInternalHTTPPortName = "healthchecks"
+ lokiGRPCPortName = "grpclb"
+ lokiGossipPortName = "gossip-ring"
lokiLivenessPath = "/loki/api/v1/status/buildinfo"
lokiReadinessPath = "/ready"
@@ -63,6 +65,11 @@ const (
// labelJobComponent is a ServiceMonitor.Spec.JobLabel.
labelJobComponent string = "loki.grafana.com/component"
+ // AnnotationCertRotationRequiredAt stores the point in time the last cert rotation happened
+ AnnotationCertRotationRequiredAt string = "loki.grafana.com/certRotationRequiredAt"
+ // AnnotationLokiConfigHash stores the last SHA1 hash of the loki configuration
+ AnnotationLokiConfigHash string = "loki.grafana.com/config-hash"
+
// LabelCompactorComponent is the label value for the compactor component
LabelCompactorComponent string = "compactor"
// LabelDistributorComponent is the label value for the distributor component
@@ -84,10 +91,6 @@ const (
httpTLSDir = "/var/run/tls/http"
// grpcTLSDir is the path that is mounted from the secret for TLS
grpcTLSDir = "/var/run/tls/grpc"
- // tlsCertFile is the file of the X509 server certificate file
- tlsCertFile = "tls.crt"
- // tlsKeyFile is the file name of the server private key
- tlsKeyFile = "tls.key"
// LokiStackCABundleDir is the path that is mounted from the configmap for TLS
caBundleDir = "/var/run/ca"
// caFile is the file name of the certificate authority file
@@ -102,9 +105,10 @@ var (
volumeFileSystemMode = corev1.PersistentVolumeFilesystem
)
-func commonAnnotations(h string) map[string]string {
+func commonAnnotations(configHash, rotationRequiredAt string) map[string]string {
return map[string]string{
- "loki.grafana.com/config-hash": h,
+ AnnotationLokiConfigHash: configHash,
+ AnnotationCertRotationRequiredAt: rotationRequiredAt,
}
}
@@ -193,6 +197,58 @@ func lokiConfigMapName(stackName string) string {
return fmt.Sprintf("%s-config", stackName)
}
+func lokiServerGRPCTLSDir() string {
+ return path.Join(grpcTLSDir, "server")
+}
+
+func lokiServerGRPCTLSCert() string {
+ return path.Join(lokiServerGRPCTLSDir(), corev1.TLSCertKey)
+}
+
+func lokiServerGRPCTLSKey() string {
+ return path.Join(lokiServerGRPCTLSDir(), corev1.TLSPrivateKeyKey)
+}
+
+func lokiServerHTTPTLSDir() string {
+ return path.Join(httpTLSDir, "server")
+}
+
+func lokiServerHTTPTLSCert() string {
+ return path.Join(lokiServerHTTPTLSDir(), corev1.TLSCertKey)
+}
+
+func lokiServerHTTPTLSKey() string {
+ return path.Join(lokiServerHTTPTLSDir(), corev1.TLSPrivateKeyKey)
+}
+
+func gatewayServerHTTPTLSDir() string {
+ return path.Join(httpTLSDir, "server")
+}
+
+func gatewayServerHTTPTLSCert() string {
+ return path.Join(gatewayServerHTTPTLSDir(), corev1.TLSCertKey)
+}
+
+func gatewayServerHTTPTLSKey() string {
+ return path.Join(gatewayServerHTTPTLSDir(), corev1.TLSPrivateKeyKey)
+}
+
+func gatewayUpstreamHTTPTLSDir() string {
+ return path.Join(httpTLSDir, "upstream")
+}
+
+func gatewayUpstreamHTTPTLSCert() string {
+ return path.Join(gatewayUpstreamHTTPTLSDir(), corev1.TLSCertKey)
+}
+
+func gatewayUpstreamHTTPTLSKey() string {
+ return path.Join(gatewayUpstreamHTTPTLSDir(), corev1.TLSPrivateKeyKey)
+}
+
+func gatewayClientSecretName(stackName string) string {
+ return fmt.Sprintf("%s-gateway-client-http", stackName)
+}
+
func serviceNameQuerierHTTP(stackName string) string {
return fmt.Sprintf("%s-querier-http", stackName)
}
@@ -257,14 +313,46 @@ func serviceMonitorName(componentName string) string {
return fmt.Sprintf("%s-monitor", componentName)
}
-func signingServiceSecretName(serviceName string) string {
- return fmt.Sprintf("%s-tls", serviceName)
-}
-
func signingCABundleName(stackName string) string {
return fmt.Sprintf("%s-ca-bundle", stackName)
}
+func gatewaySigningCABundleName(gwName string) string {
+ return fmt.Sprintf("%s-ca-bundle", gwName)
+}
+
+func gatewaySigningCADir() string {
+ return path.Join(caBundleDir, "server")
+}
+
+func gatewaySigningCAPath() string {
+ return path.Join(gatewaySigningCADir(), caFile)
+}
+
+func gatewayUpstreamCADir() string {
+ return path.Join(caBundleDir, "upstream")
+}
+
+func gatewayUpstreamCAPath() string {
+ return path.Join(gatewayUpstreamCADir(), caFile)
+}
+
+func gatewayTokenSecretName(gwName string) string {
+ return fmt.Sprintf("%s-token", gwName)
+}
+
+func alertmanagerSigningCABundleName(rulerName string) string {
+ return fmt.Sprintf("%s-ca-bundle", rulerName)
+}
+
+func alertmanagerUpstreamCADir() string {
+ return path.Join(caBundleDir, "alertmanager")
+}
+
+func alertmanagerUpstreamCAPath() string {
+ return path.Join(alertmanagerUpstreamCADir(), caFile)
+}
+
func signingCAPath() string {
return path.Join(caBundleDir, caFile)
}
@@ -273,27 +361,82 @@ func fqdn(serviceName, namespace string) string {
return fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, namespace)
}
-// serviceMonitorTLSConfig returns the TLS configuration for service monitors.
-func serviceMonitorTLSConfig(serviceName, namespace string) monitoringv1.TLSConfig {
- return monitoringv1.TLSConfig{
- SafeTLSConfig: monitoringv1.SafeTLSConfig{
- // ServerName can be e.g. loki-distributor-http.openshift-logging.svc.cluster.local
- ServerName: fqdn(serviceName, namespace),
- },
- CAFile: PrometheusCAFile,
+// lokiServiceMonitorEndpoint returns the lokistack endpoint for service monitors.
+func lokiServiceMonitorEndpoint(stackName, portName, serviceName, namespace string, enableTLS bool) monitoringv1.Endpoint {
+ if enableTLS {
+ tlsConfig := monitoringv1.TLSConfig{
+ SafeTLSConfig: monitoringv1.SafeTLSConfig{
+ CA: monitoringv1.SecretOrConfigMap{
+ ConfigMap: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: signingCABundleName(stackName),
+ },
+ Key: caFile,
+ },
+ },
+ Cert: monitoringv1.SecretOrConfigMap{
+ Secret: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: serviceName,
+ },
+ Key: corev1.TLSCertKey,
+ },
+ },
+ KeySecret: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: serviceName,
+ },
+ Key: corev1.TLSPrivateKeyKey,
+ },
+ // ServerName can be e.g. loki-distributor-http.openshift-logging.svc.cluster.local
+ ServerName: fqdn(serviceName, namespace),
+ },
+ }
+
+ return monitoringv1.Endpoint{
+ Port: portName,
+ Path: "/metrics",
+ Scheme: "https",
+ TLSConfig: &tlsConfig,
+ }
+ }
+
+ return monitoringv1.Endpoint{
+ Port: portName,
+ Path: "/metrics",
+ Scheme: "http",
}
}
-// serviceMonitorEndpoint returns the lokistack endpoint for service monitors.
-func serviceMonitorEndpoint(portName, serviceName, namespace string, enableTLS bool) monitoringv1.Endpoint {
+// gatewayServiceMonitorEndpoint returns the lokistack endpoint for service monitors.
+func gatewayServiceMonitorEndpoint(gatewayName, portName, serviceName, namespace string, enableTLS bool) monitoringv1.Endpoint {
if enableTLS {
- tlsConfig := serviceMonitorTLSConfig(serviceName, namespace)
+ tlsConfig := monitoringv1.TLSConfig{
+ SafeTLSConfig: monitoringv1.SafeTLSConfig{
+ CA: monitoringv1.SecretOrConfigMap{
+ ConfigMap: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: gatewaySigningCABundleName(gatewayName),
+ },
+ Key: caFile,
+ },
+ },
+ // ServerName can be e.g. lokistack-dev-gateway-http.openshift-logging.svc.cluster.local
+ ServerName: fqdn(serviceName, namespace),
+ },
+ }
+
return monitoringv1.Endpoint{
- Port: portName,
- Path: "/metrics",
- Scheme: "https",
- BearerTokenFile: BearerTokenFile,
- TLSConfig: &tlsConfig,
+ Port: portName,
+ Path: "/metrics",
+ Scheme: "https",
+ BearerTokenSecret: corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: gatewayTokenSecretName(gatewayName),
+ },
+ Key: corev1.ServiceAccountTokenKey,
+ },
+ TLSConfig: &tlsConfig,
}
}
diff --git a/operator/main.go b/operator/main.go
index 4c258abfc8eda..f7049495464ec 100644
--- a/operator/main.go
+++ b/operator/main.go
@@ -102,25 +102,25 @@ func main() {
if err = (&lokictrl.LokiStackReconciler{
Client: mgr.GetClient(),
- Log: logger.WithName("controllers").WithName("LokiStack"),
+ Log: logger.WithName("controllers").WithName("lokistack"),
Scheme: mgr.GetScheme(),
FeatureGates: ctrlCfg.Gates,
}).SetupWithManager(mgr); err != nil {
- logger.Error(err, "unable to create controller", "controller", "LokiStack")
+ logger.Error(err, "unable to create controller", "controller", "lokistack")
os.Exit(1)
}
if ctrlCfg.Gates.LokiStackWebhook {
if err = (&lokiv1.LokiStack{}).SetupWebhookWithManager(mgr); err != nil {
- logger.Error(err, "unable to create webhook", "webhook", "LokiStack")
+ logger.Error(err, "unable to create webhook", "webhook", "lokistack")
os.Exit(1)
}
}
if err = (&lokictrl.AlertingRuleReconciler{
Client: mgr.GetClient(),
- Log: logger.WithName("controllers").WithName("AlertingRule"),
+ Log: logger.WithName("controllers").WithName("alertingrule"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
- logger.Error(err, "unable to create controller", "controller", "AlertingRule")
+ logger.Error(err, "unable to create controller", "controller", "alertingrule")
os.Exit(1)
}
if ctrlCfg.Gates.AlertingRuleWebhook {
@@ -130,16 +130,16 @@ func main() {
}
if err = v.SetupWebhookWithManager(mgr); err != nil {
- logger.Error(err, "unable to create webhook", "webhook", "AlertingRule")
+ logger.Error(err, "unable to create webhook", "webhook", "alertingrule")
os.Exit(1)
}
}
if err = (&lokictrl.RecordingRuleReconciler{
Client: mgr.GetClient(),
- Log: logger.WithName("controllers").WithName("RecordingRule"),
+ Log: logger.WithName("controllers").WithName("recordingrule"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
- logger.Error(err, "unable to create controller", "controller", "RecordingRule")
+ logger.Error(err, "unable to create controller", "controller", "recordingrule")
os.Exit(1)
}
if ctrlCfg.Gates.RecordingRuleWebhook {
@@ -149,7 +149,7 @@ func main() {
}
if err = v.SetupWebhookWithManager(mgr); err != nil {
- logger.Error(err, "unable to create webhook", "webhook", "RecordingRule")
+ logger.Error(err, "unable to create webhook", "webhook", "recordingrule")
os.Exit(1)
}
}
@@ -157,9 +157,20 @@ func main() {
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
- logger.Error(err, "unable to create controller", "controller", "RulerConfig")
+ logger.Error(err, "unable to create controller", "controller", "rulerconfig")
os.Exit(1)
}
+ if ctrlCfg.Gates.BuiltInCertManagement.Enabled {
+ if err = (&lokictrl.CertRotationReconciler{
+ Client: mgr.GetClient(),
+ Log: logger.WithName("controllers").WithName("certrotation"),
+ Scheme: mgr.GetScheme(),
+ FeatureGates: ctrlCfg.Gates,
+ }).SetupWithManager(mgr); err != nil {
+ logger.Error(err, "unable to create controller", "controller", "certrotation")
+ os.Exit(1)
+ }
+ }
// +kubebuilder:scaffold:builder
if err = mgr.AddHealthzCheck("health", healthz.Ping); err != nil {
diff --git a/operator/quickstart.sh b/operator/quickstart.sh
index 58bf086729550..70ccae1f74472 100755
--- a/operator/quickstart.sh
+++ b/operator/quickstart.sh
@@ -67,8 +67,9 @@ certificates() {
kubectl -n cert-manager rollout status deployment cert-manager-webhook
kubectl apply -f ./hack/addons_kind_certs.yaml
- kubectl wait --timeout=180s --for=condition=ready certificate/lokistack-dev-ca-bundle
- kubectl create configmap lokistack-dev-ca-bundle --from-literal service-ca.crt="$(kubectl get secret lokistack-dev-ca-bundle -o json | jq -r '.data."ca.crt"' | base64 -d -)"
+ kubectl wait --timeout=180s --for=condition=ready certificate/lokistack-dev-signing-ca
+ kubectl create configmap lokistack-dev-ca-bundle --from-literal service-ca.crt="$(kubectl get secret lokistack-dev-signing-ca -o json | jq -r '.data."ca.crt"' | base64 -d -)"
+ kubectl create configmap lokistack-dev-gateway-ca-bundle --from-literal service-ca.crt="$(kubectl get secret lokistack-dev-signing-ca -o json | jq -r '.data."ca.crt"' | base64 -d -)"
}
check() {
|
operator
|
Add support for built-in-cert-rotation for all internal lokistack encryption (#7064)
|
246623f7d09039761d9950414064d6c1bf74207f
|
2024-03-30 04:35:36
|
Trevor Whitney
|
fix(detected_fields): fix issues with frontend integration (#12406)
| false
|
diff --git a/cmd/loki/loki-local-experimental-config.yaml b/cmd/loki/loki-local-experimental-config.yaml
deleted file mode 100644
index 03734fec08b49..0000000000000
--- a/cmd/loki/loki-local-experimental-config.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-auth_enabled: false
-
-server:
- http_listen_port: 3100
- grpc_listen_port: 9096
-
-common:
- instance_addr: 127.0.0.1
- path_prefix: /tmp/loki
- storage:
- filesystem:
- chunks_directory: /tmp/loki/chunks
- rules_directory: /tmp/loki/rules
- replication_factor: 1
- ring:
- kvstore:
- store: inmemory
-
-query_range:
- results_cache:
- cache:
- embedded_cache:
- enabled: true
- max_size_mb: 100
-
-schema_config:
- configs:
- - from: 2020-10-24
- store: tsdb
- object_store: filesystem
- schema: v13
- index:
- prefix: index_
- period: 24h
-
-frontend:
- experimental_apis_enabled: true
-
-ruler:
- alertmanager_url: http://localhost:9093
-
-# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
-# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
-#
-# Statistics help us better understand how Loki is used, and they show us performance
-# levels for most users. This helps us prioritize features and documentation.
-# For more information on what's sent, look at
-# https://github.com/grafana/loki/blob/main/pkg/analytics/stats.go
-# Refer to the buildReport method to see what goes into a report.
-#
-# If you would like to disable reporting, uncomment the following lines:
-#analytics:
-# reporting_enabled: false
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index dc0716a34fa23..82cb0ecadea03 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -817,10 +817,6 @@ The `frontend` block configures the Loki query-frontend.
# The TLS configuration.
[tail_tls_config: <tls_config>]
-
-# Whether to enable experimental APIs in the frontend.
-# CLI flag: -frontend.experimental-apis-enabled
-[experimental_apis_enabled: <boolean> | default = false]
```
### query_range
diff --git a/pkg/logproto/extensions.go b/pkg/logproto/extensions.go
index e3996fbe69cbd..9c0e5c3d432d5 100644
--- a/pkg/logproto/extensions.go
+++ b/pkg/logproto/extensions.go
@@ -141,11 +141,17 @@ func (m *Shard) SpaceFor(stats *IndexStatsResponse, targetShardBytes uint64) boo
return newDelta <= curDelta
}
+type DetectedFieldType string
+
const (
- DetectedFieldString DetectedFieldType = 0
- DetectedFieldInt DetectedFieldType = 1
- DetectedFieldFloat DetectedFieldType = 2
- DetectedFieldBoolean DetectedFieldType = 3
- DetectedFieldDuration DetectedFieldType = 4
- DetectedFieldBytes DetectedFieldType = 5
+ DetectedFieldString DetectedFieldType = "string"
+ DetectedFieldInt DetectedFieldType = "int"
+ DetectedFieldFloat DetectedFieldType = "float"
+ DetectedFieldBoolean DetectedFieldType = "boolean"
+ DetectedFieldDuration DetectedFieldType = "duration"
+ DetectedFieldBytes DetectedFieldType = "bytes"
)
+
+func (d DetectedFieldType) String() string {
+ return string(d)
+}
diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go
index 1747d4dda7d5e..2b794a5f899c2 100644
--- a/pkg/logproto/logproto.pb.go
+++ b/pkg/logproto/logproto.pb.go
@@ -63,39 +63,6 @@ func (Direction) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_c28a5f14f1f4c79a, []int{0}
}
-type DetectedFieldType int32
-
-const (
- STRING DetectedFieldType = 0
- INT DetectedFieldType = 1
- FLOAT DetectedFieldType = 2
- BOOL DetectedFieldType = 3
- DURATION DetectedFieldType = 4
- BYTES DetectedFieldType = 5
-)
-
-var DetectedFieldType_name = map[int32]string{
- 0: "STRING",
- 1: "INT",
- 2: "FLOAT",
- 3: "BOOL",
- 4: "DURATION",
- 5: "BYTES",
-}
-
-var DetectedFieldType_value = map[string]int32{
- "STRING": 0,
- "INT": 1,
- "FLOAT": 2,
- "BOOL": 3,
- "DURATION": 4,
- "BYTES": 5,
-}
-
-func (DetectedFieldType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_c28a5f14f1f4c79a, []int{1}
-}
-
type StreamRatesRequest struct {
}
@@ -2649,7 +2616,7 @@ func (m *DetectedFieldsResponse) GetFields() []*DetectedField {
type DetectedField struct {
Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"`
- Type DetectedFieldType `protobuf:"varint,2,opt,name=type,proto3,enum=logproto.DetectedFieldType" json:"type,omitempty"`
+ Type DetectedFieldType `protobuf:"bytes,2,opt,name=type,proto3,casttype=DetectedFieldType" json:"type,omitempty"`
Cardinality uint64 `protobuf:"varint,3,opt,name=cardinality,proto3" json:"cardinality,omitempty"`
}
@@ -2696,7 +2663,7 @@ func (m *DetectedField) GetType() DetectedFieldType {
if m != nil {
return m.Type
}
- return STRING
+ return ""
}
func (m *DetectedField) GetCardinality() uint64 {
@@ -2708,7 +2675,6 @@ func (m *DetectedField) GetCardinality() uint64 {
func init() {
proto.RegisterEnum("logproto.Direction", Direction_name, Direction_value)
- proto.RegisterEnum("logproto.DetectedFieldType", DetectedFieldType_name, DetectedFieldType_value)
proto.RegisterType((*StreamRatesRequest)(nil), "logproto.StreamRatesRequest")
proto.RegisterType((*StreamRatesResponse)(nil), "logproto.StreamRatesResponse")
proto.RegisterType((*StreamRate)(nil), "logproto.StreamRate")
@@ -2763,161 +2729,157 @@ func init() {
func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) }
var fileDescriptor_c28a5f14f1f4c79a = []byte{
- // 2455 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4d, 0x6f, 0x1b, 0xc7,
- 0x95, 0xcb, 0x5d, 0x7e, 0x3d, 0x52, 0x32, 0x3d, 0x62, 0x6c, 0x82, 0x76, 0x48, 0x79, 0x90, 0x3a,
- 0x82, 0xe3, 0x88, 0xb1, 0xdc, 0xb8, 0xa9, 0xdd, 0xa0, 0x15, 0x25, 0x4b, 0x91, 0x2d, 0x4b, 0xce,
- 0x48, 0x71, 0x53, 0xa3, 0xad, 0xb1, 0x22, 0x47, 0xd4, 0x42, 0xe4, 0x2e, 0xbd, 0x3b, 0x8c, 0x4d,
- 0xa0, 0x87, 0xfe, 0x81, 0xa0, 0xb9, 0x15, 0xbd, 0x14, 0x2d, 0x50, 0x20, 0x05, 0x8a, 0x5e, 0xfa,
- 0x03, 0xda, 0x4b, 0x0f, 0xee, 0xcd, 0xb9, 0x05, 0x39, 0xb0, 0xb5, 0x7c, 0x29, 0x74, 0xca, 0xad,
- 0xd7, 0x62, 0x3e, 0xf6, 0x8b, 0xa2, 0xdc, 0x50, 0x75, 0x51, 0xf8, 0xc2, 0x9d, 0x79, 0xf3, 0xe6,
- 0xcd, 0xfb, 0x9a, 0xf7, 0x31, 0x84, 0x73, 0xbd, 0xfd, 0x76, 0xbd, 0xe3, 0xb4, 0x7b, 0xae, 0xc3,
- 0x9c, 0x60, 0x30, 0x2f, 0x7e, 0x51, 0xd6, 0x9f, 0x57, 0x4a, 0x6d, 0xa7, 0xed, 0x48, 0x1c, 0x3e,
- 0x92, 0xeb, 0x95, 0x5a, 0xdb, 0x71, 0xda, 0x1d, 0x5a, 0x17, 0xb3, 0x9d, 0xfe, 0x6e, 0x9d, 0x59,
- 0x5d, 0xea, 0x31, 0xb3, 0xdb, 0x53, 0x08, 0xb3, 0x8a, 0xfa, 0xc3, 0x4e, 0xd7, 0x69, 0xd1, 0x4e,
- 0xdd, 0x63, 0x26, 0xf3, 0xe4, 0xaf, 0xc2, 0x98, 0xe1, 0x18, 0xbd, 0xbe, 0xb7, 0x27, 0x7e, 0x24,
- 0x10, 0x97, 0x00, 0x6d, 0x31, 0x97, 0x9a, 0x5d, 0x62, 0x32, 0xea, 0x11, 0xfa, 0xb0, 0x4f, 0x3d,
- 0x86, 0xef, 0xc0, 0x4c, 0x0c, 0xea, 0xf5, 0x1c, 0xdb, 0xa3, 0xe8, 0x1a, 0xe4, 0xbd, 0x10, 0x5c,
- 0xd6, 0x66, 0xf5, 0xb9, 0xfc, 0x42, 0x69, 0x3e, 0x10, 0x25, 0xdc, 0x43, 0xa2, 0x88, 0xf8, 0xd7,
- 0x1a, 0x40, 0xb8, 0x86, 0xaa, 0x00, 0x72, 0xf5, 0x03, 0xd3, 0xdb, 0x2b, 0x6b, 0xb3, 0xda, 0x9c,
- 0x41, 0x22, 0x10, 0x74, 0x19, 0x4e, 0x87, 0xb3, 0x0d, 0x67, 0x6b, 0xcf, 0x74, 0x5b, 0xe5, 0xa4,
- 0x40, 0x3b, 0xba, 0x80, 0x10, 0x18, 0xae, 0xc9, 0x68, 0x59, 0x9f, 0xd5, 0xe6, 0x74, 0x22, 0xc6,
- 0xe8, 0x0c, 0xa4, 0x19, 0xb5, 0x4d, 0x9b, 0x95, 0x8d, 0x59, 0x6d, 0x2e, 0x47, 0xd4, 0x8c, 0xc3,
- 0xb9, 0xec, 0xd4, 0x2b, 0xa7, 0x66, 0xb5, 0xb9, 0x29, 0xa2, 0x66, 0xf8, 0x73, 0x1d, 0x0a, 0x1f,
- 0xf6, 0xa9, 0x3b, 0x50, 0x0a, 0x40, 0x55, 0xc8, 0x7a, 0xb4, 0x43, 0x9b, 0xcc, 0x71, 0x05, 0x83,
- 0xb9, 0x46, 0xb2, 0xac, 0x91, 0x00, 0x86, 0x4a, 0x90, 0xea, 0x58, 0x5d, 0x8b, 0x09, 0xb6, 0xa6,
- 0x88, 0x9c, 0xa0, 0xeb, 0x90, 0xf2, 0x98, 0xe9, 0x32, 0xc1, 0x4b, 0x7e, 0xa1, 0x32, 0x2f, 0x8d,
- 0x36, 0xef, 0x1b, 0x6d, 0x7e, 0xdb, 0x37, 0x5a, 0x23, 0xfb, 0x64, 0x58, 0x4b, 0x7c, 0xf6, 0xf7,
- 0x9a, 0x46, 0xe4, 0x16, 0x74, 0x0d, 0x74, 0x6a, 0xb7, 0x04, 0xbf, 0xdf, 0x74, 0x27, 0xdf, 0x80,
- 0xae, 0x40, 0xae, 0x65, 0xb9, 0xb4, 0xc9, 0x2c, 0xc7, 0x16, 0x52, 0x4d, 0x2f, 0xcc, 0x84, 0x16,
- 0x59, 0xf6, 0x97, 0x48, 0x88, 0x85, 0x2e, 0x43, 0xda, 0xe3, 0xaa, 0xf3, 0xca, 0x99, 0x59, 0x7d,
- 0x2e, 0xd7, 0x28, 0x1d, 0x0e, 0x6b, 0x45, 0x09, 0xb9, 0xec, 0x74, 0x2d, 0x46, 0xbb, 0x3d, 0x36,
- 0x20, 0x0a, 0x07, 0x5d, 0x82, 0x4c, 0x8b, 0x76, 0x28, 0x37, 0x78, 0x56, 0x18, 0xbc, 0x18, 0x21,
- 0x2f, 0x16, 0x88, 0x8f, 0x80, 0xee, 0x83, 0xd1, 0xeb, 0x98, 0x76, 0x39, 0x27, 0xa4, 0x98, 0x0e,
- 0x11, 0xef, 0x76, 0x4c, 0xbb, 0x71, 0xed, 0xab, 0x61, 0x6d, 0xa1, 0x6d, 0xb1, 0xbd, 0xfe, 0xce,
- 0x7c, 0xd3, 0xe9, 0xd6, 0xdb, 0xae, 0xb9, 0x6b, 0xda, 0x66, 0xbd, 0xe3, 0xec, 0x5b, 0x75, 0xee,
- 0x9c, 0x0f, 0xfb, 0xd4, 0xb5, 0xa8, 0x5b, 0xe7, 0x34, 0xe6, 0x85, 0x3d, 0xf8, 0x3e, 0x22, 0x68,
- 0xde, 0x32, 0xb2, 0xe9, 0x62, 0x06, 0x0f, 0x93, 0x80, 0xb6, 0xcc, 0x6e, 0xaf, 0x43, 0x27, 0xb2,
- 0x57, 0x60, 0x99, 0xe4, 0x89, 0x2d, 0xa3, 0x4f, 0x6a, 0x99, 0x50, 0xcd, 0xc6, 0x64, 0x6a, 0x4e,
- 0x7d, 0x53, 0x35, 0xa7, 0x5f, 0xbe, 0x9a, 0x71, 0x19, 0x0c, 0x3e, 0x43, 0x45, 0xd0, 0x5d, 0xf3,
- 0x91, 0x50, 0x66, 0x81, 0xf0, 0x21, 0x5e, 0x87, 0xb4, 0x64, 0x04, 0x55, 0x46, 0xb5, 0x1d, 0xbf,
- 0x19, 0xa1, 0xa6, 0x75, 0x5f, 0x87, 0xc5, 0x50, 0x87, 0xba, 0xd0, 0x0e, 0xfe, 0x8d, 0x06, 0x53,
- 0xca, 0x84, 0x2a, 0xba, 0xec, 0x40, 0x46, 0xde, 0x6e, 0x3f, 0xb2, 0x9c, 0x1d, 0x8d, 0x2c, 0x8b,
- 0x2d, 0xb3, 0xc7, 0xa8, 0xdb, 0xa8, 0x3f, 0x19, 0xd6, 0xb4, 0xaf, 0x86, 0xb5, 0x37, 0x5f, 0x24,
- 0xa5, 0x08, 0x72, 0x2a, 0xea, 0xf8, 0x84, 0xd1, 0x5b, 0x82, 0x3b, 0xe6, 0x29, 0x3f, 0x38, 0x35,
- 0x2f, 0x03, 0xe4, 0x9a, 0xdd, 0xa6, 0x1e, 0xa7, 0x6c, 0x70, 0x13, 0x12, 0x89, 0x83, 0x7f, 0x06,
- 0x33, 0x31, 0x57, 0x53, 0x7c, 0xbe, 0x07, 0x69, 0x8f, 0x2b, 0xd0, 0x67, 0x33, 0x62, 0xa8, 0x2d,
- 0x01, 0x6f, 0x4c, 0x2b, 0xfe, 0xd2, 0x72, 0x4e, 0x14, 0xfe, 0x64, 0xa7, 0xff, 0x55, 0x83, 0xc2,
- 0xba, 0xb9, 0x43, 0x3b, 0xbe, 0x8f, 0x23, 0x30, 0x6c, 0xb3, 0x4b, 0x95, 0xc6, 0xc5, 0x98, 0x07,
- 0xb4, 0x4f, 0xcc, 0x4e, 0x9f, 0x4a, 0x92, 0x59, 0xa2, 0x66, 0x93, 0x46, 0x22, 0xed, 0xc4, 0x91,
- 0x48, 0x0b, 0xfd, 0xbd, 0x04, 0x29, 0xee, 0x59, 0x03, 0x11, 0x85, 0x72, 0x44, 0x4e, 0xf0, 0x9b,
- 0x30, 0xa5, 0xa4, 0x50, 0xea, 0x0b, 0x59, 0xe6, 0xea, 0xcb, 0xf9, 0x2c, 0xe3, 0x2e, 0xa4, 0xa5,
- 0xb6, 0xd1, 0x1b, 0x90, 0x0b, 0xb2, 0x9b, 0x90, 0x56, 0x6f, 0xa4, 0x0f, 0x87, 0xb5, 0x24, 0xf3,
- 0x48, 0xb8, 0x80, 0x6a, 0x90, 0x12, 0x3b, 0x85, 0xe4, 0x5a, 0x23, 0x77, 0x38, 0xac, 0x49, 0x00,
- 0x91, 0x1f, 0x74, 0x1e, 0x8c, 0x3d, 0x9e, 0x60, 0xb8, 0x0a, 0x8c, 0x46, 0xf6, 0x70, 0x58, 0x13,
- 0x73, 0x22, 0x7e, 0xf1, 0x2a, 0x14, 0xd6, 0x69, 0xdb, 0x6c, 0x0e, 0xd4, 0xa1, 0x25, 0x9f, 0x1c,
- 0x3f, 0x50, 0xf3, 0x69, 0x5c, 0x80, 0x42, 0x70, 0xe2, 0x83, 0xae, 0xa7, 0x9c, 0x3a, 0x1f, 0xc0,
- 0xee, 0x78, 0xf8, 0x57, 0x1a, 0x28, 0x3b, 0x23, 0x0c, 0xe9, 0x0e, 0x97, 0xd5, 0x53, 0x31, 0x08,
- 0x0e, 0x87, 0x35, 0x05, 0x21, 0xea, 0x8b, 0x6e, 0x40, 0xc6, 0x13, 0x27, 0x72, 0x62, 0xa3, 0xee,
- 0x23, 0x16, 0x1a, 0xa7, 0xb8, 0x1b, 0x1c, 0x0e, 0x6b, 0x3e, 0x22, 0xf1, 0x07, 0x68, 0x3e, 0x96,
- 0x39, 0xa5, 0x60, 0xd3, 0x87, 0xc3, 0x5a, 0x04, 0x1a, 0xcd, 0xa4, 0xf8, 0x5f, 0x1a, 0xe4, 0xb7,
- 0x4d, 0x2b, 0x70, 0xa1, 0xb2, 0x6f, 0xa2, 0x30, 0x46, 0x4a, 0x00, 0xbf, 0xd2, 0x2d, 0xda, 0x31,
- 0x07, 0x2b, 0x8e, 0x2b, 0xe8, 0x4e, 0x91, 0x60, 0x1e, 0x26, 0x3b, 0x63, 0x6c, 0xb2, 0x4b, 0x4d,
- 0x1e, 0x52, 0xff, 0x87, 0x01, 0xec, 0x96, 0x91, 0x4d, 0x16, 0x75, 0xfc, 0x47, 0x0d, 0x0a, 0x52,
- 0x72, 0xe5, 0x76, 0x3f, 0x86, 0xb4, 0x54, 0x8c, 0x90, 0xfd, 0x05, 0xc1, 0xe5, 0xad, 0x49, 0x02,
- 0x8b, 0xa2, 0x89, 0xbe, 0x0f, 0xd3, 0x2d, 0xd7, 0xe9, 0xf5, 0x68, 0x6b, 0x4b, 0x85, 0xb0, 0xe4,
- 0x68, 0x08, 0x5b, 0x8e, 0xae, 0x93, 0x11, 0x74, 0xfc, 0x37, 0x0d, 0xa6, 0x54, 0xb4, 0x50, 0xb6,
- 0x0a, 0xf4, 0xab, 0x9d, 0x38, 0x65, 0x25, 0x27, 0x4d, 0x59, 0x67, 0x20, 0xdd, 0x76, 0x9d, 0x7e,
- 0xcf, 0x2b, 0xeb, 0xf2, 0x6e, 0xca, 0xd9, 0x64, 0xa9, 0x0c, 0xdf, 0x82, 0x69, 0x5f, 0x94, 0x63,
- 0x42, 0x66, 0x65, 0x34, 0x64, 0xae, 0xb5, 0xa8, 0xcd, 0xac, 0x5d, 0x2b, 0x08, 0x82, 0x0a, 0x1f,
- 0xff, 0x42, 0x83, 0xe2, 0x28, 0x0a, 0x5a, 0x8e, 0xdc, 0x33, 0x4e, 0xee, 0xe2, 0xf1, 0xe4, 0xe6,
- 0x45, 0xf0, 0xf1, 0x6e, 0xda, 0xcc, 0x1d, 0xf8, 0xa4, 0xe5, 0xde, 0xca, 0xbb, 0x90, 0x8f, 0x2c,
- 0xf2, 0x14, 0xb5, 0x4f, 0xd5, 0xcd, 0x20, 0x7c, 0x18, 0x86, 0x84, 0xa4, 0x0c, 0x68, 0x62, 0x82,
- 0x7f, 0xa9, 0xc1, 0x54, 0xcc, 0x96, 0xe8, 0x3d, 0x30, 0x76, 0x5d, 0xa7, 0x3b, 0x91, 0xa1, 0xc4,
- 0x0e, 0xf4, 0x6d, 0x48, 0x32, 0x67, 0x22, 0x33, 0x25, 0x99, 0xc3, 0xad, 0xa4, 0xc4, 0xd7, 0x65,
- 0x75, 0x2b, 0x67, 0xf8, 0x5d, 0xc8, 0x09, 0x81, 0xee, 0x9a, 0x96, 0x3b, 0x36, 0x5b, 0x8c, 0x17,
- 0xe8, 0x06, 0x9c, 0x92, 0x91, 0x70, 0xfc, 0xe6, 0xc2, 0xb8, 0xcd, 0x05, 0x7f, 0xf3, 0x39, 0x48,
- 0x2d, 0xed, 0xf5, 0xed, 0x7d, 0xbe, 0xa5, 0x65, 0x32, 0xd3, 0xdf, 0xc2, 0xc7, 0xf8, 0x35, 0x98,
- 0xe1, 0x77, 0x90, 0xba, 0xde, 0x92, 0xd3, 0xb7, 0x99, 0xdf, 0x5d, 0x5c, 0x86, 0x52, 0x1c, 0xac,
- 0xbc, 0xa4, 0x04, 0xa9, 0x26, 0x07, 0x08, 0x1a, 0x53, 0x44, 0x4e, 0xf0, 0xef, 0x34, 0x40, 0xab,
- 0x94, 0x89, 0x53, 0xd6, 0x96, 0x83, 0xeb, 0x51, 0x81, 0x6c, 0xd7, 0x64, 0xcd, 0x3d, 0xea, 0x7a,
- 0x7e, 0x0d, 0xe2, 0xcf, 0xff, 0x1f, 0xd5, 0x1e, 0xbe, 0x02, 0x33, 0x31, 0x2e, 0x95, 0x4c, 0x15,
- 0xc8, 0x36, 0x15, 0x4c, 0xe5, 0xbb, 0x60, 0x8e, 0xff, 0x94, 0x84, 0xac, 0xd8, 0x40, 0xe8, 0x2e,
- 0xba, 0x02, 0xf9, 0x5d, 0xcb, 0x6e, 0x53, 0xb7, 0xe7, 0x5a, 0x4a, 0x05, 0x46, 0xe3, 0xd4, 0xe1,
- 0xb0, 0x16, 0x05, 0x93, 0xe8, 0x04, 0xbd, 0x0d, 0x99, 0xbe, 0x47, 0xdd, 0x07, 0x96, 0xbc, 0xe9,
- 0xb9, 0x46, 0xe9, 0x60, 0x58, 0x4b, 0x7f, 0xe4, 0x51, 0x77, 0x6d, 0x99, 0x67, 0x9e, 0xbe, 0x18,
- 0x11, 0xf9, 0x6d, 0xa1, 0xdb, 0xca, 0x4d, 0x45, 0x11, 0xd6, 0xf8, 0x0e, 0x67, 0x7f, 0x24, 0xd4,
- 0xf5, 0x5c, 0xa7, 0x4b, 0xd9, 0x1e, 0xed, 0x7b, 0xf5, 0xa6, 0xd3, 0xed, 0x3a, 0x76, 0x5d, 0xf4,
- 0x92, 0x42, 0x68, 0x9e, 0x3e, 0xf9, 0x76, 0xe5, 0xb9, 0xdb, 0x90, 0x61, 0x7b, 0xae, 0xd3, 0x6f,
- 0xef, 0x89, 0xac, 0xa0, 0x37, 0xae, 0x4f, 0x4e, 0xcf, 0xa7, 0x40, 0xfc, 0x01, 0xba, 0xc0, 0xb5,
- 0x45, 0x9b, 0xfb, 0x5e, 0xbf, 0x2b, 0x3b, 0xb4, 0x46, 0xea, 0x70, 0x58, 0xd3, 0xde, 0x26, 0x01,
- 0x18, 0x7f, 0x9a, 0x84, 0x9a, 0x70, 0xd4, 0x7b, 0xa2, 0x6c, 0x58, 0x71, 0xdc, 0x3b, 0x94, 0xb9,
- 0x56, 0x73, 0xc3, 0xec, 0x52, 0xdf, 0x37, 0x6a, 0x90, 0xef, 0x0a, 0xe0, 0x83, 0xc8, 0x15, 0x80,
- 0x6e, 0x80, 0x87, 0x5e, 0x07, 0x10, 0x77, 0x46, 0xae, 0xcb, 0xdb, 0x90, 0x13, 0x10, 0xb1, 0xbc,
- 0x14, 0xd3, 0x54, 0x7d, 0x42, 0xc9, 0x94, 0x86, 0xd6, 0x46, 0x35, 0x34, 0x31, 0x9d, 0x40, 0x2d,
- 0x51, 0x5f, 0x4f, 0xc5, 0x7d, 0x1d, 0x7f, 0xa1, 0x41, 0x75, 0xdd, 0xe7, 0xfc, 0x84, 0xea, 0xf0,
- 0xe5, 0x4d, 0xbe, 0x24, 0x79, 0xf5, 0xff, 0x4e, 0x5e, 0x5c, 0x05, 0x58, 0xb7, 0x6c, 0xba, 0x62,
- 0x75, 0x18, 0x75, 0xc7, 0x74, 0x22, 0x9f, 0xea, 0x61, 0x48, 0x20, 0x74, 0xd7, 0x97, 0x73, 0x29,
- 0x12, 0x87, 0x5f, 0x86, 0x18, 0xc9, 0x97, 0x68, 0x36, 0x7d, 0x24, 0x44, 0xed, 0x43, 0x66, 0x57,
- 0x88, 0x27, 0x53, 0x6a, 0xec, 0x19, 0x25, 0x94, 0xbd, 0x71, 0x43, 0x1d, 0x7e, 0xf5, 0x45, 0x05,
- 0x89, 0x78, 0xf5, 0xa9, 0x7b, 0x03, 0x9b, 0x99, 0x8f, 0x23, 0x9b, 0x89, 0x7f, 0x02, 0xfa, 0xa9,
- 0x2a, 0xb7, 0x52, 0x63, 0xcb, 0x2d, 0xff, 0xe6, 0x9e, 0xbc, 0x67, 0x7c, 0x3f, 0x8c, 0x7d, 0xc2,
- 0x1c, 0x2a, 0xf6, 0x5d, 0x04, 0xc3, 0xa5, 0xbb, 0x7e, 0x92, 0x46, 0xe1, 0xb1, 0x01, 0xa6, 0x58,
- 0xc7, 0x7f, 0xd6, 0xa0, 0xb8, 0x4a, 0x59, 0xbc, 0xfc, 0x79, 0x85, 0x8c, 0x89, 0x3f, 0x80, 0xd3,
- 0x11, 0xfe, 0x95, 0xf4, 0x57, 0x47, 0x6a, 0x9e, 0xd7, 0x42, 0xf9, 0xd7, 0xec, 0x16, 0x7d, 0xac,
- 0x7a, 0xc5, 0x78, 0xb9, 0x73, 0x17, 0xf2, 0x91, 0x45, 0xb4, 0x38, 0x52, 0xe8, 0x44, 0x5e, 0x76,
- 0x82, 0x64, 0xdd, 0x28, 0x29, 0x99, 0x64, 0xb7, 0xa8, 0xca, 0xd8, 0xa0, 0x28, 0xd8, 0x02, 0x24,
- 0xcc, 0x25, 0xc8, 0x46, 0xd3, 0x92, 0x80, 0xde, 0x0e, 0x2a, 0x9e, 0x60, 0x8e, 0x2e, 0x80, 0xe1,
- 0x3a, 0x8f, 0xfc, 0x0a, 0x76, 0x2a, 0x3c, 0x92, 0x38, 0x8f, 0x88, 0x58, 0xc2, 0x37, 0x40, 0x27,
- 0xce, 0x23, 0x54, 0x05, 0x70, 0x4d, 0xbb, 0x4d, 0xef, 0x05, 0x8d, 0x53, 0x81, 0x44, 0x20, 0xc7,
- 0x94, 0x0c, 0x4b, 0x70, 0x3a, 0xca, 0x91, 0x34, 0xf7, 0x3c, 0x64, 0x3e, 0xec, 0x47, 0xd5, 0x55,
- 0x1a, 0x51, 0x97, 0xec, 0xc1, 0x7d, 0x24, 0xee, 0x33, 0x10, 0xc2, 0xd1, 0x79, 0xc8, 0x31, 0x73,
- 0xa7, 0x43, 0x37, 0xc2, 0x00, 0x17, 0x02, 0xf8, 0x2a, 0xef, 0xf9, 0xee, 0x45, 0x6a, 0x9f, 0x10,
- 0x80, 0x2e, 0x41, 0x31, 0xe4, 0xf9, 0xae, 0x4b, 0x77, 0xad, 0xc7, 0xc2, 0xc2, 0x05, 0x72, 0x04,
- 0x8e, 0xe6, 0xe0, 0x54, 0x08, 0xdb, 0x12, 0x35, 0x86, 0x21, 0x50, 0x47, 0xc1, 0x5c, 0x37, 0x42,
- 0xdc, 0x9b, 0x0f, 0xfb, 0x66, 0x47, 0xdc, 0xbc, 0x02, 0x89, 0x40, 0xf0, 0x5f, 0x34, 0x38, 0x2d,
- 0x4d, 0xcd, 0xbb, 0xfd, 0x57, 0xd1, 0xeb, 0x3f, 0xd7, 0x00, 0x45, 0x25, 0x50, 0xae, 0xf5, 0xad,
- 0xe8, 0x33, 0x0e, 0x2f, 0x62, 0xf2, 0xa2, 0x95, 0x95, 0xa0, 0xf0, 0x25, 0x06, 0x43, 0x5a, 0x14,
- 0x42, 0xb2, 0xa7, 0x36, 0x64, 0xaf, 0x2c, 0x21, 0x44, 0x7d, 0x79, 0x8b, 0xbf, 0x33, 0x60, 0xd4,
- 0x53, 0x9d, 0xae, 0x68, 0xf1, 0x05, 0x80, 0xc8, 0x0f, 0x3f, 0x8b, 0xda, 0x4c, 0x78, 0x8d, 0x11,
- 0x9e, 0xa5, 0x40, 0xc4, 0x1f, 0xe0, 0x3f, 0x24, 0x61, 0xea, 0x9e, 0xd3, 0xe9, 0x87, 0x29, 0xf1,
- 0x55, 0x4a, 0x15, 0xb1, 0xf6, 0x3b, 0xe5, 0xb7, 0xdf, 0x08, 0x0c, 0x8f, 0xd1, 0x9e, 0xf0, 0x2c,
- 0x9d, 0x88, 0x31, 0xc2, 0x50, 0x60, 0xa6, 0xdb, 0xa6, 0x4c, 0xf6, 0x35, 0xe5, 0xb4, 0x28, 0x38,
- 0x63, 0x30, 0x34, 0x0b, 0x79, 0xb3, 0xdd, 0x76, 0x69, 0xdb, 0x64, 0xb4, 0x31, 0x28, 0x67, 0xc4,
- 0x61, 0x51, 0x10, 0xfe, 0x18, 0xa6, 0x7d, 0x65, 0x29, 0x93, 0xbe, 0x03, 0x99, 0x4f, 0x04, 0x64,
- 0xcc, 0x93, 0x97, 0x44, 0x55, 0x61, 0xcc, 0x47, 0x8b, 0xbf, 0x8f, 0xfb, 0x3c, 0xe3, 0x5b, 0x90,
- 0x96, 0xe8, 0xe8, 0x7c, 0xb4, 0x3b, 0x91, 0x6f, 0x33, 0x7c, 0xae, 0x5a, 0x0d, 0x0c, 0x69, 0x49,
- 0x48, 0x19, 0x5e, 0xf8, 0x86, 0x84, 0x10, 0xf5, 0xc5, 0xbf, 0xd5, 0xe0, 0xb5, 0x65, 0xca, 0x68,
- 0x93, 0xd1, 0xd6, 0x8a, 0x45, 0x3b, 0xad, 0x93, 0x36, 0xce, 0xda, 0x89, 0x1b, 0xe7, 0x71, 0x6f,
- 0x5f, 0x7a, 0xf4, 0xed, 0x6b, 0x0d, 0xce, 0x8c, 0xb2, 0xa8, 0x34, 0x5a, 0x87, 0xf4, 0xae, 0x80,
- 0x1c, 0x7d, 0xea, 0x8c, 0xed, 0x20, 0x0a, 0x0d, 0x3f, 0x86, 0xa9, 0xd8, 0x82, 0xd0, 0x30, 0xb7,
- 0xa8, 0x8a, 0x76, 0x72, 0x82, 0xea, 0x60, 0xb0, 0x41, 0x4f, 0x06, 0xb9, 0xe9, 0x85, 0x73, 0xc7,
- 0x50, 0xdd, 0x1e, 0xf4, 0x28, 0x11, 0x88, 0xdc, 0x1d, 0x9a, 0xa6, 0xdb, 0xb2, 0x6c, 0xb3, 0x63,
- 0x31, 0xc9, 0xbe, 0x41, 0xa2, 0xa0, 0x4b, 0x17, 0x21, 0x17, 0xfc, 0x8b, 0x80, 0xf2, 0x90, 0x59,
- 0xd9, 0x24, 0x3f, 0x5c, 0x24, 0xcb, 0xc5, 0x04, 0x2a, 0x40, 0xb6, 0xb1, 0xb8, 0x74, 0x5b, 0xcc,
- 0xb4, 0x4b, 0xf7, 0xe0, 0xf4, 0x91, 0x43, 0x10, 0x40, 0x7a, 0x6b, 0x9b, 0xac, 0x6d, 0xac, 0x16,
- 0x13, 0x28, 0x03, 0xfa, 0xda, 0xc6, 0x76, 0x51, 0x43, 0x39, 0x48, 0xad, 0xac, 0x6f, 0x2e, 0x6e,
- 0x17, 0x93, 0x28, 0x0b, 0x46, 0x63, 0x73, 0x73, 0xbd, 0xa8, 0x73, 0x62, 0xcb, 0x1f, 0x91, 0xc5,
- 0xed, 0xb5, 0xcd, 0x8d, 0xa2, 0xc1, 0x51, 0x1a, 0x3f, 0xda, 0xbe, 0xb9, 0x55, 0x4c, 0x2d, 0x7c,
- 0x91, 0xf2, 0x53, 0x83, 0x8b, 0xbe, 0x07, 0x29, 0x19, 0xef, 0xcf, 0x84, 0x92, 0x45, 0xff, 0x07,
- 0xa8, 0x9c, 0x3d, 0x02, 0x97, 0x0a, 0xc7, 0x89, 0x77, 0x34, 0xb4, 0x01, 0x79, 0x01, 0x54, 0x2f,
- 0x7e, 0xe7, 0x47, 0x1f, 0xde, 0x62, 0x94, 0x5e, 0x3f, 0x66, 0x35, 0x42, 0xef, 0x3a, 0xa4, 0xc4,
- 0xa5, 0x8a, 0x72, 0x13, 0x7d, 0xb1, 0x8d, 0x72, 0x13, 0x7b, 0x03, 0xc5, 0x09, 0xf4, 0x5d, 0x30,
- 0x78, 0x0f, 0x8c, 0x22, 0x55, 0x41, 0xe4, 0xa1, 0xae, 0x72, 0x66, 0x14, 0x1c, 0x39, 0xf6, 0xfd,
- 0xe0, 0xbd, 0xf1, 0xec, 0xe8, 0xbb, 0x87, 0xbf, 0xbd, 0x7c, 0x74, 0x21, 0x38, 0x79, 0x53, 0x3e,
- 0x8c, 0xf9, 0xdd, 0x37, 0x7a, 0x3d, 0x7e, 0xd4, 0x48, 0xb3, 0x5e, 0xa9, 0x1e, 0xb7, 0x1c, 0x10,
- 0x5c, 0x87, 0x7c, 0xa4, 0xf3, 0x8d, 0xaa, 0xf5, 0x68, 0xdb, 0x1e, 0x55, 0xeb, 0x98, 0x76, 0x19,
- 0x27, 0xd0, 0x2a, 0x64, 0x79, 0x2d, 0xc5, 0x53, 0x0a, 0x3a, 0x37, 0x5a, 0x32, 0x45, 0x52, 0x65,
- 0xe5, 0xfc, 0xf8, 0xc5, 0x80, 0xd0, 0x0f, 0x20, 0xb7, 0x4a, 0x99, 0x8a, 0x37, 0x67, 0x47, 0x03,
- 0xd6, 0x18, 0x4d, 0xc5, 0x83, 0x1e, 0x4e, 0xa0, 0x8f, 0x45, 0x59, 0x17, 0xbf, 0xc1, 0xa8, 0x76,
- 0xcc, 0x9d, 0x0a, 0xf8, 0x9a, 0x3d, 0x1e, 0xc1, 0xa7, 0xbc, 0xf0, 0x13, 0xff, 0xff, 0xd0, 0x65,
- 0x93, 0x99, 0x68, 0x13, 0xa6, 0x85, 0xc8, 0xc1, 0x1f, 0xa6, 0x31, 0xd7, 0x3c, 0xf2, 0xef, 0x6c,
- 0xcc, 0x35, 0x8f, 0xfe, 0x4b, 0x8b, 0x13, 0x8d, 0xfb, 0x4f, 0x9f, 0x55, 0x13, 0x5f, 0x3e, 0xab,
- 0x26, 0xbe, 0x7e, 0x56, 0xd5, 0x7e, 0x7e, 0x50, 0xd5, 0x7e, 0x7f, 0x50, 0xd5, 0x9e, 0x1c, 0x54,
- 0xb5, 0xa7, 0x07, 0x55, 0xed, 0x1f, 0x07, 0x55, 0xed, 0x9f, 0x07, 0xd5, 0xc4, 0xd7, 0x07, 0x55,
- 0xed, 0xb3, 0xe7, 0xd5, 0xc4, 0xd3, 0xe7, 0xd5, 0xc4, 0x97, 0xcf, 0xab, 0x89, 0xfb, 0x6f, 0xfc,
- 0x87, 0x1e, 0x43, 0x46, 0xc1, 0xb4, 0xf8, 0x5c, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x10,
- 0x3a, 0x9f, 0xc0, 0xce, 0x1e, 0x00, 0x00,
+ // 2395 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4b, 0x6f, 0x1b, 0xc7,
+ 0x99, 0x4b, 0x2e, 0x5f, 0x1f, 0x29, 0x59, 0x1e, 0xd1, 0x36, 0xc1, 0xd8, 0xa4, 0x3c, 0x48, 0x1d,
+ 0xd5, 0x71, 0xc8, 0x58, 0x6e, 0xdc, 0xd4, 0x6e, 0xd0, 0x9a, 0x52, 0xec, 0xc8, 0x96, 0x1f, 0x19,
+ 0xb9, 0x6e, 0x60, 0xb4, 0x35, 0x56, 0xe4, 0x90, 0x5a, 0x88, 0xdc, 0xa5, 0x77, 0x87, 0xb1, 0x09,
+ 0xf4, 0xd0, 0x3f, 0x10, 0x34, 0xb7, 0xa2, 0x97, 0xa2, 0x05, 0x0a, 0xa4, 0x40, 0xd1, 0x4b, 0x7f,
+ 0x40, 0x7b, 0xe9, 0xc1, 0xbd, 0x39, 0xb7, 0x20, 0x07, 0xb6, 0x96, 0x2f, 0x85, 0x4e, 0xb9, 0x15,
+ 0xe8, 0xa9, 0x98, 0xd7, 0x3e, 0x28, 0xca, 0x0d, 0x55, 0x17, 0x85, 0x2f, 0xdc, 0x99, 0x6f, 0xbe,
+ 0xf9, 0xe6, 0x7b, 0xcd, 0xf7, 0x18, 0xc2, 0x6b, 0x83, 0x9d, 0x6e, 0xa3, 0xe7, 0x76, 0x07, 0x9e,
+ 0xcb, 0xdc, 0x60, 0x50, 0x17, 0xbf, 0x28, 0xa7, 0xe7, 0x95, 0x52, 0xd7, 0xed, 0xba, 0x12, 0x87,
+ 0x8f, 0xe4, 0x7a, 0xa5, 0xd6, 0x75, 0xdd, 0x6e, 0x8f, 0x36, 0xc4, 0x6c, 0x6b, 0xd8, 0x69, 0x30,
+ 0xbb, 0x4f, 0x7d, 0x66, 0xf5, 0x07, 0x0a, 0x61, 0x49, 0x51, 0x7f, 0xd8, 0xeb, 0xbb, 0x6d, 0xda,
+ 0x6b, 0xf8, 0xcc, 0x62, 0xbe, 0xfc, 0x55, 0x18, 0x8b, 0x1c, 0x63, 0x30, 0xf4, 0xb7, 0xc5, 0x8f,
+ 0x04, 0xe2, 0x12, 0xa0, 0x4d, 0xe6, 0x51, 0xab, 0x4f, 0x2c, 0x46, 0x7d, 0x42, 0x1f, 0x0e, 0xa9,
+ 0xcf, 0xf0, 0x4d, 0x58, 0x8c, 0x41, 0xfd, 0x81, 0xeb, 0xf8, 0x14, 0x5d, 0x84, 0x82, 0x1f, 0x82,
+ 0xcb, 0xc6, 0x52, 0x6a, 0xb9, 0xb0, 0x52, 0xaa, 0x07, 0xa2, 0x84, 0x7b, 0x48, 0x14, 0x11, 0xff,
+ 0xca, 0x00, 0x08, 0xd7, 0x50, 0x15, 0x40, 0xae, 0x7e, 0x60, 0xf9, 0xdb, 0x65, 0x63, 0xc9, 0x58,
+ 0x36, 0x49, 0x04, 0x82, 0xce, 0xc1, 0xd1, 0x70, 0x76, 0xcb, 0xdd, 0xdc, 0xb6, 0xbc, 0x76, 0x39,
+ 0x29, 0xd0, 0xf6, 0x2f, 0x20, 0x04, 0xa6, 0x67, 0x31, 0x5a, 0x4e, 0x2d, 0x19, 0xcb, 0x29, 0x22,
+ 0xc6, 0xe8, 0x38, 0x64, 0x18, 0x75, 0x2c, 0x87, 0x95, 0xcd, 0x25, 0x63, 0x39, 0x4f, 0xd4, 0x8c,
+ 0xc3, 0xb9, 0xec, 0xd4, 0x2f, 0xa7, 0x97, 0x8c, 0xe5, 0x39, 0xa2, 0x66, 0xf8, 0xb3, 0x14, 0x14,
+ 0x3f, 0x1c, 0x52, 0x6f, 0xa4, 0x14, 0x80, 0xaa, 0x90, 0xf3, 0x69, 0x8f, 0xb6, 0x98, 0xeb, 0x09,
+ 0x06, 0xf3, 0xcd, 0x64, 0xd9, 0x20, 0x01, 0x0c, 0x95, 0x20, 0xdd, 0xb3, 0xfb, 0x36, 0x13, 0x6c,
+ 0xcd, 0x11, 0x39, 0x41, 0x97, 0x20, 0xed, 0x33, 0xcb, 0x63, 0x82, 0x97, 0xc2, 0x4a, 0xa5, 0x2e,
+ 0x8d, 0x56, 0xd7, 0x46, 0xab, 0xdf, 0xd5, 0x46, 0x6b, 0xe6, 0x9e, 0x8c, 0x6b, 0x89, 0x4f, 0xff,
+ 0x56, 0x33, 0x88, 0xdc, 0x82, 0x2e, 0x42, 0x8a, 0x3a, 0x6d, 0xc1, 0xef, 0xd7, 0xdd, 0xc9, 0x37,
+ 0xa0, 0xf3, 0x90, 0x6f, 0xdb, 0x1e, 0x6d, 0x31, 0xdb, 0x75, 0x84, 0x54, 0xf3, 0x2b, 0x8b, 0xa1,
+ 0x45, 0xd6, 0xf4, 0x12, 0x09, 0xb1, 0xd0, 0x39, 0xc8, 0xf8, 0x5c, 0x75, 0x7e, 0x39, 0xbb, 0x94,
+ 0x5a, 0xce, 0x37, 0x4b, 0x7b, 0xe3, 0xda, 0x82, 0x84, 0x9c, 0x73, 0xfb, 0x36, 0xa3, 0xfd, 0x01,
+ 0x1b, 0x11, 0x85, 0x83, 0xce, 0x42, 0xb6, 0x4d, 0x7b, 0x94, 0x1b, 0x3c, 0x27, 0x0c, 0xbe, 0x10,
+ 0x21, 0x2f, 0x16, 0x88, 0x46, 0x40, 0xf7, 0xc1, 0x1c, 0xf4, 0x2c, 0xa7, 0x9c, 0x17, 0x52, 0xcc,
+ 0x87, 0x88, 0x77, 0x7a, 0x96, 0xd3, 0xbc, 0xf8, 0xe5, 0xb8, 0xb6, 0xd2, 0xb5, 0xd9, 0xf6, 0x70,
+ 0xab, 0xde, 0x72, 0xfb, 0x8d, 0xae, 0x67, 0x75, 0x2c, 0xc7, 0x6a, 0xf4, 0xdc, 0x1d, 0xbb, 0xc1,
+ 0x9d, 0xf3, 0xe1, 0x90, 0x7a, 0x36, 0xf5, 0x1a, 0x9c, 0x46, 0x5d, 0xd8, 0x83, 0xef, 0x23, 0x82,
+ 0xe6, 0x75, 0x33, 0x97, 0x59, 0xc8, 0xe2, 0x71, 0x12, 0xd0, 0xa6, 0xd5, 0x1f, 0xf4, 0xe8, 0x4c,
+ 0xf6, 0x0a, 0x2c, 0x93, 0x3c, 0xb4, 0x65, 0x52, 0xb3, 0x5a, 0x26, 0x54, 0xb3, 0x39, 0x9b, 0x9a,
+ 0xd3, 0x5f, 0x57, 0xcd, 0x99, 0x97, 0xaf, 0x66, 0x5c, 0x06, 0x93, 0xcf, 0xd0, 0x02, 0xa4, 0x3c,
+ 0xeb, 0x91, 0x50, 0x66, 0x91, 0xf0, 0x21, 0xde, 0x80, 0x8c, 0x64, 0x04, 0x55, 0x26, 0xb5, 0x1d,
+ 0xbf, 0x19, 0xa1, 0xa6, 0x53, 0x5a, 0x87, 0x0b, 0xa1, 0x0e, 0x53, 0x42, 0x3b, 0xf8, 0xd7, 0x06,
+ 0xcc, 0x29, 0x13, 0xaa, 0xe8, 0xb2, 0x05, 0x59, 0x79, 0xbb, 0x75, 0x64, 0x39, 0x31, 0x19, 0x59,
+ 0xae, 0xb4, 0xad, 0x01, 0xa3, 0x5e, 0xb3, 0xf1, 0x64, 0x5c, 0x33, 0xbe, 0x1c, 0xd7, 0xde, 0x78,
+ 0x91, 0x94, 0x22, 0xc8, 0xa9, 0xa8, 0xa3, 0x09, 0xa3, 0x37, 0x05, 0x77, 0xcc, 0x57, 0x7e, 0x70,
+ 0xa4, 0x2e, 0x03, 0xe4, 0xba, 0xd3, 0xa5, 0x3e, 0xa7, 0x6c, 0x72, 0x13, 0x12, 0x89, 0x83, 0x7f,
+ 0x0a, 0x8b, 0x31, 0x57, 0x53, 0x7c, 0xbe, 0x0b, 0x19, 0x9f, 0x2b, 0x50, 0xb3, 0x19, 0x31, 0xd4,
+ 0xa6, 0x80, 0x37, 0xe7, 0x15, 0x7f, 0x19, 0x39, 0x27, 0x0a, 0x7f, 0xb6, 0xd3, 0xff, 0x62, 0x40,
+ 0x71, 0xc3, 0xda, 0xa2, 0x3d, 0xed, 0xe3, 0x08, 0x4c, 0xc7, 0xea, 0x53, 0xa5, 0x71, 0x31, 0xe6,
+ 0x01, 0xed, 0x63, 0xab, 0x37, 0xa4, 0x92, 0x64, 0x8e, 0xa8, 0xd9, 0xac, 0x91, 0xc8, 0x38, 0x74,
+ 0x24, 0x32, 0x42, 0x7f, 0x2f, 0x41, 0x9a, 0x7b, 0xd6, 0x48, 0x44, 0xa1, 0x3c, 0x91, 0x13, 0xfc,
+ 0x06, 0xcc, 0x29, 0x29, 0x94, 0xfa, 0x42, 0x96, 0xb9, 0xfa, 0xf2, 0x9a, 0x65, 0xdc, 0x87, 0x8c,
+ 0xd4, 0x36, 0x7a, 0x1d, 0xf2, 0x41, 0x76, 0x13, 0xd2, 0xa6, 0x9a, 0x99, 0xbd, 0x71, 0x2d, 0xc9,
+ 0x7c, 0x12, 0x2e, 0xa0, 0x1a, 0xa4, 0xc5, 0x4e, 0x21, 0xb9, 0xd1, 0xcc, 0xef, 0x8d, 0x6b, 0x12,
+ 0x40, 0xe4, 0x07, 0x9d, 0x04, 0x73, 0x9b, 0x27, 0x18, 0xae, 0x02, 0xb3, 0x99, 0xdb, 0x1b, 0xd7,
+ 0xc4, 0x9c, 0x88, 0x5f, 0x7c, 0x0d, 0x8a, 0x1b, 0xb4, 0x6b, 0xb5, 0x46, 0xea, 0xd0, 0x92, 0x26,
+ 0xc7, 0x0f, 0x34, 0x34, 0x8d, 0xd3, 0x50, 0x0c, 0x4e, 0x7c, 0xd0, 0xf7, 0x95, 0x53, 0x17, 0x02,
+ 0xd8, 0x4d, 0x1f, 0xff, 0xd2, 0x00, 0x65, 0x67, 0x84, 0x21, 0xd3, 0xe3, 0xb2, 0xfa, 0x2a, 0x06,
+ 0xc1, 0xde, 0xb8, 0xa6, 0x20, 0x44, 0x7d, 0xd1, 0x65, 0xc8, 0xfa, 0xe2, 0x44, 0x4e, 0x6c, 0xd2,
+ 0x7d, 0xc4, 0x42, 0xf3, 0x08, 0x77, 0x83, 0xbd, 0x71, 0x4d, 0x23, 0x12, 0x3d, 0x40, 0xf5, 0x58,
+ 0xe6, 0x94, 0x82, 0xcd, 0xef, 0x8d, 0x6b, 0x11, 0x68, 0x34, 0x93, 0xe2, 0x7f, 0x1a, 0x50, 0xb8,
+ 0x6b, 0xd9, 0x81, 0x0b, 0x95, 0xb5, 0x89, 0xc2, 0x18, 0x29, 0x01, 0xfc, 0x4a, 0xb7, 0x69, 0xcf,
+ 0x1a, 0x5d, 0x75, 0x3d, 0x41, 0x77, 0x8e, 0x04, 0xf3, 0x30, 0xd9, 0x99, 0x53, 0x93, 0x5d, 0x7a,
+ 0xf6, 0x90, 0xfa, 0x3f, 0x0c, 0x60, 0xd7, 0xcd, 0x5c, 0x72, 0x21, 0x85, 0xff, 0x60, 0x40, 0x51,
+ 0x4a, 0xae, 0xdc, 0xee, 0x47, 0x90, 0x91, 0x8a, 0x11, 0xb2, 0xbf, 0x20, 0xb8, 0xbc, 0x39, 0x4b,
+ 0x60, 0x51, 0x34, 0xd1, 0xf7, 0x60, 0xbe, 0xed, 0xb9, 0x83, 0x01, 0x6d, 0x6f, 0xaa, 0x10, 0x96,
+ 0x9c, 0x0c, 0x61, 0x6b, 0xd1, 0x75, 0x32, 0x81, 0x8e, 0xff, 0x6a, 0xc0, 0x9c, 0x8a, 0x16, 0xca,
+ 0x56, 0x81, 0x7e, 0x8d, 0x43, 0xa7, 0xac, 0xe4, 0xac, 0x29, 0xeb, 0x38, 0x64, 0xba, 0x9e, 0x3b,
+ 0x1c, 0xf8, 0xe5, 0x94, 0xbc, 0x9b, 0x72, 0x36, 0x5b, 0x2a, 0xc3, 0xd7, 0x61, 0x5e, 0x8b, 0x72,
+ 0x40, 0xc8, 0xac, 0x4c, 0x86, 0xcc, 0xf5, 0x36, 0x75, 0x98, 0xdd, 0xb1, 0x83, 0x20, 0xa8, 0xf0,
+ 0xf1, 0xcf, 0x0d, 0x58, 0x98, 0x44, 0x41, 0x6b, 0x91, 0x7b, 0xc6, 0xc9, 0x9d, 0x39, 0x98, 0x5c,
+ 0x5d, 0x04, 0x1f, 0xff, 0x7d, 0x87, 0x79, 0x23, 0x4d, 0x5a, 0xee, 0xad, 0xbc, 0x03, 0x85, 0xc8,
+ 0x22, 0x4f, 0x51, 0x3b, 0x54, 0xdd, 0x0c, 0xc2, 0x87, 0x61, 0x48, 0x48, 0xca, 0x80, 0x26, 0x26,
+ 0xf8, 0x17, 0x06, 0xcc, 0xc5, 0x6c, 0x89, 0xde, 0x05, 0xb3, 0xe3, 0xb9, 0xfd, 0x99, 0x0c, 0x25,
+ 0x76, 0xa0, 0x6f, 0x41, 0x92, 0xb9, 0x33, 0x99, 0x29, 0xc9, 0x5c, 0x6e, 0x25, 0x25, 0x7e, 0x4a,
+ 0x56, 0xb7, 0x72, 0x86, 0xdf, 0x81, 0xbc, 0x10, 0xe8, 0x8e, 0x65, 0x7b, 0x53, 0xb3, 0xc5, 0x74,
+ 0x81, 0x2e, 0xc3, 0x11, 0x19, 0x09, 0xa7, 0x6f, 0x2e, 0x4e, 0xdb, 0x5c, 0xd4, 0x9b, 0x5f, 0x83,
+ 0xf4, 0xea, 0xf6, 0xd0, 0xd9, 0xe1, 0x5b, 0xda, 0x16, 0xb3, 0xf4, 0x16, 0x3e, 0xc6, 0xc7, 0x60,
+ 0x91, 0xdf, 0x41, 0xea, 0xf9, 0xab, 0xee, 0xd0, 0x61, 0xba, 0xbb, 0x38, 0x07, 0xa5, 0x38, 0x58,
+ 0x79, 0x49, 0x09, 0xd2, 0x2d, 0x0e, 0x10, 0x34, 0xe6, 0x88, 0x9c, 0xe0, 0xdf, 0x1a, 0x80, 0xae,
+ 0x51, 0x26, 0x4e, 0x59, 0x5f, 0x0b, 0xae, 0x47, 0x05, 0x72, 0x7d, 0x8b, 0xb5, 0xb6, 0xa9, 0xe7,
+ 0xeb, 0x1a, 0x44, 0xcf, 0xff, 0x1f, 0xd5, 0x1e, 0x3e, 0x0f, 0x8b, 0x31, 0x2e, 0x95, 0x4c, 0x15,
+ 0xc8, 0xb5, 0x14, 0x4c, 0xe5, 0xbb, 0x60, 0x8e, 0xff, 0x98, 0x84, 0x9c, 0xd8, 0x40, 0x68, 0x07,
+ 0x9d, 0x87, 0x42, 0xc7, 0x76, 0xba, 0xd4, 0x1b, 0x78, 0xb6, 0x52, 0x81, 0xd9, 0x3c, 0xb2, 0x37,
+ 0xae, 0x45, 0xc1, 0x24, 0x3a, 0x41, 0x6f, 0x41, 0x76, 0xe8, 0x53, 0xef, 0x81, 0x2d, 0x6f, 0x7a,
+ 0xbe, 0x59, 0xda, 0x1d, 0xd7, 0x32, 0x3f, 0xf0, 0xa9, 0xb7, 0xbe, 0xc6, 0x33, 0xcf, 0x50, 0x8c,
+ 0x88, 0xfc, 0xb6, 0xd1, 0x0d, 0xe5, 0xa6, 0xa2, 0x08, 0x6b, 0x7e, 0x9b, 0xb3, 0x3f, 0x11, 0xea,
+ 0x06, 0x9e, 0xdb, 0xa7, 0x6c, 0x9b, 0x0e, 0xfd, 0x46, 0xcb, 0xed, 0xf7, 0x5d, 0xa7, 0x21, 0x7a,
+ 0x49, 0x21, 0x34, 0x4f, 0x9f, 0x7c, 0xbb, 0xf2, 0xdc, 0xbb, 0x90, 0x65, 0xdb, 0x9e, 0x3b, 0xec,
+ 0x6e, 0x8b, 0xac, 0x90, 0x6a, 0x5e, 0x9a, 0x9d, 0x9e, 0xa6, 0x40, 0xf4, 0x00, 0x9d, 0xe6, 0xda,
+ 0xa2, 0xad, 0x1d, 0x7f, 0xd8, 0x97, 0x1d, 0x5a, 0x33, 0xbd, 0x37, 0xae, 0x19, 0x6f, 0x91, 0x00,
+ 0x8c, 0x3f, 0x49, 0x42, 0x4d, 0x38, 0xea, 0x3d, 0x51, 0x36, 0x5c, 0x75, 0xbd, 0x9b, 0x94, 0x79,
+ 0x76, 0xeb, 0x96, 0xd5, 0xa7, 0xda, 0x37, 0x6a, 0x50, 0xe8, 0x0b, 0xe0, 0x83, 0xc8, 0x15, 0x80,
+ 0x7e, 0x80, 0x87, 0x4e, 0x01, 0x88, 0x3b, 0x23, 0xd7, 0xe5, 0x6d, 0xc8, 0x0b, 0x88, 0x58, 0x5e,
+ 0x8d, 0x69, 0xaa, 0x31, 0xa3, 0x64, 0x4a, 0x43, 0xeb, 0x93, 0x1a, 0x9a, 0x99, 0x4e, 0xa0, 0x96,
+ 0xa8, 0xaf, 0xa7, 0xe3, 0xbe, 0x8e, 0x3f, 0x37, 0xa0, 0xba, 0xa1, 0x39, 0x3f, 0xa4, 0x3a, 0xb4,
+ 0xbc, 0xc9, 0x97, 0x24, 0x6f, 0xea, 0xbf, 0x93, 0x17, 0x57, 0x01, 0x36, 0x6c, 0x87, 0x5e, 0xb5,
+ 0x7b, 0x8c, 0x7a, 0x53, 0x3a, 0x91, 0x4f, 0x52, 0x61, 0x48, 0x20, 0xb4, 0xa3, 0xe5, 0x5c, 0x8d,
+ 0xc4, 0xe1, 0x97, 0x21, 0x46, 0xf2, 0x25, 0x9a, 0x2d, 0x35, 0x11, 0xa2, 0x76, 0x20, 0xdb, 0x11,
+ 0xe2, 0xc9, 0x94, 0x1a, 0x7b, 0x46, 0x09, 0x65, 0x6f, 0x5e, 0x56, 0x87, 0x5f, 0x78, 0x51, 0x41,
+ 0x22, 0x5e, 0x7d, 0x1a, 0xfe, 0xc8, 0x61, 0xd6, 0xe3, 0xc8, 0x66, 0xa2, 0x4f, 0x40, 0x3f, 0x51,
+ 0xe5, 0x56, 0x7a, 0x6a, 0xb9, 0xa5, 0x6f, 0xee, 0xe1, 0x7b, 0xc6, 0xf7, 0xc2, 0xd8, 0x27, 0xcc,
+ 0xa1, 0x62, 0xdf, 0x19, 0x30, 0x3d, 0xda, 0xd1, 0x49, 0x1a, 0x85, 0xc7, 0x06, 0x98, 0x62, 0x1d,
+ 0xff, 0xc9, 0x80, 0x85, 0x6b, 0x94, 0xc5, 0xcb, 0x9f, 0x57, 0xc8, 0x98, 0xf8, 0x03, 0x38, 0x1a,
+ 0xe1, 0x5f, 0x49, 0x7f, 0x61, 0xa2, 0xe6, 0x39, 0x16, 0xca, 0xbf, 0xee, 0xb4, 0xe9, 0x63, 0xd5,
+ 0x2b, 0xc6, 0xcb, 0x9d, 0x3b, 0x50, 0x88, 0x2c, 0xa2, 0x2b, 0x13, 0x85, 0x4e, 0xe4, 0x65, 0x27,
+ 0x48, 0xd6, 0xcd, 0x92, 0x92, 0x49, 0x76, 0x8b, 0xaa, 0x8c, 0x0d, 0x8a, 0x82, 0x4d, 0x40, 0xc2,
+ 0x5c, 0x82, 0x6c, 0x34, 0x2d, 0x09, 0xe8, 0x8d, 0xa0, 0xe2, 0x09, 0xe6, 0xe8, 0x34, 0x98, 0x9e,
+ 0xfb, 0x48, 0x57, 0xb0, 0x73, 0xe1, 0x91, 0xc4, 0x7d, 0x44, 0xc4, 0x12, 0xbe, 0x0c, 0x29, 0xe2,
+ 0x3e, 0x42, 0x55, 0x00, 0xcf, 0x72, 0xba, 0xf4, 0x5e, 0xd0, 0x38, 0x15, 0x49, 0x04, 0x72, 0x40,
+ 0xc9, 0xb0, 0x0a, 0x47, 0xa3, 0x1c, 0x49, 0x73, 0xd7, 0x21, 0xfb, 0xe1, 0x30, 0xaa, 0xae, 0xd2,
+ 0x84, 0xba, 0x64, 0x0f, 0xae, 0x91, 0xb8, 0xcf, 0x40, 0x08, 0x47, 0x27, 0x21, 0xcf, 0xac, 0xad,
+ 0x1e, 0xbd, 0x15, 0x06, 0xb8, 0x10, 0xc0, 0x57, 0x79, 0xcf, 0x77, 0x2f, 0x52, 0xfb, 0x84, 0x00,
+ 0x74, 0x16, 0x16, 0x42, 0x9e, 0xef, 0x78, 0xb4, 0x63, 0x3f, 0x16, 0x16, 0x2e, 0x92, 0x7d, 0x70,
+ 0xb4, 0x0c, 0x47, 0x42, 0xd8, 0xa6, 0xa8, 0x31, 0x4c, 0x81, 0x3a, 0x09, 0xe6, 0xba, 0x11, 0xe2,
+ 0xbe, 0xff, 0x70, 0x68, 0xf5, 0xc4, 0xcd, 0x2b, 0x92, 0x08, 0x04, 0xff, 0xd9, 0x80, 0xa3, 0xd2,
+ 0xd4, 0xbc, 0xdb, 0x7f, 0x15, 0xbd, 0xfe, 0x33, 0x03, 0x50, 0x54, 0x02, 0xe5, 0x5a, 0xdf, 0x88,
+ 0x3e, 0xe3, 0xf0, 0x22, 0xa6, 0x20, 0x5a, 0x59, 0x09, 0x0a, 0x5f, 0x62, 0x30, 0x64, 0x44, 0x21,
+ 0x24, 0x7b, 0x6a, 0x53, 0xf6, 0xca, 0x12, 0x42, 0xd4, 0x97, 0xb7, 0xf8, 0x5b, 0x23, 0x46, 0x7d,
+ 0xd5, 0xe9, 0x8a, 0x16, 0x5f, 0x00, 0x88, 0xfc, 0xf0, 0xb3, 0xa8, 0xc3, 0x84, 0xd7, 0x98, 0xe1,
+ 0x59, 0x0a, 0x44, 0xf4, 0x00, 0xff, 0x3e, 0x09, 0x73, 0xf7, 0xdc, 0xde, 0x30, 0x4c, 0x89, 0xaf,
+ 0x52, 0xaa, 0x88, 0xb5, 0xdf, 0x69, 0xdd, 0x7e, 0x23, 0x30, 0x7d, 0x46, 0x07, 0xc2, 0xb3, 0x52,
+ 0x44, 0x8c, 0x11, 0x86, 0x22, 0xb3, 0xbc, 0x2e, 0x65, 0xb2, 0xaf, 0x29, 0x67, 0x44, 0xc1, 0x19,
+ 0x83, 0xa1, 0x25, 0x28, 0x58, 0xdd, 0xae, 0x47, 0xbb, 0x16, 0xa3, 0xcd, 0x51, 0x39, 0x2b, 0x0e,
+ 0x8b, 0x82, 0xf0, 0x47, 0x30, 0xaf, 0x95, 0xa5, 0x4c, 0xfa, 0x36, 0x64, 0x3f, 0x16, 0x90, 0x29,
+ 0x4f, 0x5e, 0x12, 0x55, 0x85, 0x31, 0x8d, 0x16, 0x7f, 0x1f, 0xd7, 0x3c, 0xe3, 0xeb, 0x90, 0x91,
+ 0xe8, 0xe8, 0x64, 0xb4, 0x3b, 0x91, 0x6f, 0x33, 0x7c, 0xae, 0x5a, 0x0d, 0x0c, 0x19, 0x49, 0x48,
+ 0x19, 0x5e, 0xf8, 0x86, 0x84, 0x10, 0xf5, 0xc5, 0xbf, 0x31, 0xe0, 0xd8, 0x1a, 0x65, 0xb4, 0xc5,
+ 0x68, 0xfb, 0xaa, 0x4d, 0x7b, 0xed, 0xc3, 0x36, 0xce, 0xc6, 0xa1, 0x1b, 0xe7, 0x69, 0x6f, 0x5f,
+ 0xa9, 0xe8, 0xdb, 0xd7, 0x3a, 0x1c, 0x9f, 0x64, 0x51, 0x69, 0xb4, 0x01, 0x99, 0x8e, 0x80, 0xec,
+ 0x7f, 0xea, 0x8c, 0xed, 0x20, 0x0a, 0x0d, 0x7b, 0x30, 0x17, 0x5b, 0x10, 0x1a, 0xe6, 0x16, 0x55,
+ 0xd1, 0x4e, 0x4e, 0xd0, 0x37, 0xc1, 0x64, 0xa3, 0x81, 0x0a, 0x72, 0xcd, 0x63, 0xff, 0x1a, 0xd7,
+ 0x8e, 0xc6, 0xb6, 0xdd, 0x1d, 0x0d, 0x28, 0x11, 0x28, 0xdc, 0x11, 0x5a, 0x96, 0xd7, 0xb6, 0x1d,
+ 0xab, 0x67, 0x33, 0xc9, 0xb8, 0x49, 0xa2, 0xa0, 0xb3, 0x67, 0x20, 0x1f, 0xfc, 0x7f, 0x80, 0x0a,
+ 0x90, 0xbd, 0x7a, 0x9b, 0xfc, 0xf0, 0x0a, 0x59, 0x5b, 0x48, 0xa0, 0x22, 0xe4, 0x9a, 0x57, 0x56,
+ 0x6f, 0x88, 0x99, 0xb1, 0xf2, 0x79, 0x5a, 0x07, 0x6f, 0x0f, 0x7d, 0x17, 0xd2, 0x32, 0x22, 0x1f,
+ 0x0f, 0x25, 0x8a, 0xbe, 0xd4, 0x57, 0x4e, 0xec, 0x83, 0x4b, 0x95, 0xe0, 0xc4, 0xdb, 0x06, 0xba,
+ 0x05, 0x05, 0x01, 0x54, 0x6f, 0x72, 0x27, 0x27, 0x9f, 0xc6, 0x62, 0x94, 0x4e, 0x1d, 0xb0, 0x1a,
+ 0xa1, 0x77, 0x09, 0xd2, 0xc2, 0xed, 0xa3, 0xdc, 0x44, 0xdf, 0x54, 0xa3, 0xdc, 0xc4, 0x5e, 0x29,
+ 0x71, 0x02, 0x7d, 0x07, 0x4c, 0xde, 0xa5, 0xa2, 0x48, 0xde, 0x8e, 0x3c, 0xa5, 0x55, 0x8e, 0x4f,
+ 0x82, 0x23, 0xc7, 0xbe, 0x17, 0xbc, 0x08, 0x9e, 0x98, 0x7c, 0x99, 0xd0, 0xdb, 0xcb, 0xfb, 0x17,
+ 0x82, 0x93, 0x6f, 0xcb, 0xa7, 0x2b, 0xdd, 0x1f, 0xa3, 0x53, 0xf1, 0xa3, 0x26, 0xda, 0xe9, 0x4a,
+ 0xf5, 0xa0, 0xe5, 0x80, 0xe0, 0x06, 0x14, 0x22, 0xbd, 0x69, 0x54, 0xad, 0xfb, 0x1b, 0xeb, 0xa8,
+ 0x5a, 0xa7, 0x34, 0xb4, 0x38, 0x81, 0xae, 0x41, 0x8e, 0x57, 0x3b, 0x3c, 0xe8, 0xa3, 0xd7, 0x26,
+ 0x8b, 0x9a, 0x48, 0x32, 0xab, 0x9c, 0x9c, 0xbe, 0x18, 0x10, 0xfa, 0x3e, 0xe4, 0xaf, 0x51, 0xa6,
+ 0x22, 0xc2, 0x89, 0xc9, 0x90, 0x32, 0x45, 0x53, 0xf1, 0xb0, 0x84, 0x13, 0xe8, 0x23, 0x51, 0x78,
+ 0xc5, 0xef, 0x18, 0xaa, 0x1d, 0x70, 0x97, 0x02, 0xbe, 0x96, 0x0e, 0x46, 0xd0, 0x94, 0x57, 0x7e,
+ 0xac, 0xff, 0xb1, 0x5c, 0xb3, 0x98, 0x85, 0x6e, 0xc3, 0xbc, 0x10, 0x39, 0xf8, 0x4b, 0x33, 0xe6,
+ 0x9a, 0xfb, 0xfe, 0x3f, 0x8d, 0xb9, 0xe6, 0xfe, 0xff, 0x51, 0x71, 0xa2, 0x79, 0xff, 0xe9, 0xb3,
+ 0x6a, 0xe2, 0x8b, 0x67, 0xd5, 0xc4, 0x57, 0xcf, 0xaa, 0xc6, 0xcf, 0x76, 0xab, 0xc6, 0xef, 0x76,
+ 0xab, 0xc6, 0x93, 0xdd, 0xaa, 0xf1, 0x74, 0xb7, 0x6a, 0xfc, 0x7d, 0xb7, 0x6a, 0xfc, 0x63, 0xb7,
+ 0x9a, 0xf8, 0x6a, 0xb7, 0x6a, 0x7c, 0xfa, 0xbc, 0x9a, 0x78, 0xfa, 0xbc, 0x9a, 0xf8, 0xe2, 0x79,
+ 0x35, 0x71, 0xff, 0xf5, 0xff, 0xd0, 0x05, 0xc8, 0x38, 0x95, 0x11, 0x9f, 0x0b, 0xff, 0x0e, 0x00,
+ 0x00, 0xff, 0xff, 0x46, 0xc6, 0x73, 0x88, 0x70, 0x1e, 0x00, 0x00,
}
func (x Direction) String() string {
@@ -2927,13 +2889,6 @@ func (x Direction) String() string {
}
return strconv.Itoa(int(x))
}
-func (x DetectedFieldType) String() string {
- s, ok := DetectedFieldType_name[int32(x)]
- if ok {
- return s
- }
- return strconv.Itoa(int(x))
-}
func (this *StreamRatesRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
@@ -7823,10 +7778,12 @@ func (m *DetectedField) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x18
}
- if m.Type != 0 {
- i = encodeVarintLogproto(dAtA, i, uint64(m.Type))
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintLogproto(dAtA, i, uint64(len(m.Type)))
i--
- dAtA[i] = 0x10
+ dAtA[i] = 0x12
}
if len(m.Label) > 0 {
i -= len(m.Label)
@@ -8777,8 +8734,9 @@ func (m *DetectedField) Size() (n int) {
if l > 0 {
n += 1 + l + sovLogproto(uint64(l))
}
- if m.Type != 0 {
- n += 1 + sovLogproto(uint64(m.Type))
+ l = len(m.Type)
+ if l > 0 {
+ n += 1 + l + sovLogproto(uint64(l))
}
if m.Cardinality != 0 {
n += 1 + sovLogproto(uint64(m.Cardinality))
@@ -15645,10 +15603,10 @@ func (m *DetectedField) Unmarshal(dAtA []byte) error {
m.Label = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
- if wireType != 0 {
+ if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
- m.Type = 0
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
@@ -15658,11 +15616,24 @@ func (m *DetectedField) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Type |= DetectedFieldType(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLogproto
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = DetectedFieldType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType)
diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto
index 48799a85dce1a..136400a555f2e 100644
--- a/pkg/logproto/logproto.proto
+++ b/pkg/logproto/logproto.proto
@@ -439,17 +439,8 @@ message DetectedFieldsResponse {
repeated DetectedField fields = 1;
}
-enum DetectedFieldType {
- STRING = 0;
- INT = 1;
- FLOAT = 2;
- BOOL = 3;
- DURATION = 4;
- BYTES = 5;
-}
-
message DetectedField {
string label = 1;
- DetectedFieldType type = 2;
+ string type = 2 [(gogoproto.casttype) = "DetectedFieldType"];
uint64 cardinality = 3;
}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index e9fc10bfc09e8..2b438ad158ec1 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -1039,6 +1039,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
t.Server.HTTP.Path("/loki/api/v1/labels").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/label/{name}/values").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/series").Methods("GET", "POST").Handler(frontendHandler)
+ t.Server.HTTP.Path("/loki/api/v1/detected_fields").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/index/stats").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/index/shards").Methods("GET", "POST").Handler(frontendHandler)
t.Server.HTTP.Path("/loki/api/v1/index/volume").Methods("GET", "POST").Handler(frontendHandler)
@@ -1056,10 +1057,6 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
t.Server.HTTP.Path("/api/prom/tail").Methods("GET", "POST").Handler(defaultHandler)
}
- if t.Cfg.Frontend.ExperimentalAPIsEnabled {
- t.Server.HTTP.Path("/loki/api/experimental/detected_fields").Methods("GET", "POST").Handler(frontendHandler)
- }
-
if t.frontend == nil {
return services.NewIdleService(nil, func(_ error) error {
if t.stopper != nil {
diff --git a/pkg/lokifrontend/config.go b/pkg/lokifrontend/config.go
index 648a049c74812..30ab5cd29fecc 100644
--- a/pkg/lokifrontend/config.go
+++ b/pkg/lokifrontend/config.go
@@ -20,8 +20,6 @@ type Config struct {
TailProxyURL string `yaml:"tail_proxy_url"`
TLS tls.ClientConfig `yaml:"tail_tls_config"`
-
- ExperimentalAPIsEnabled bool `yaml:"experimental_apis_enabled"`
}
// RegisterFlags adds the flags required to config this to the given FlagSet.
@@ -34,5 +32,4 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.CompressResponses, "querier.compress-http-responses", true, "Compress HTTP responses.")
f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Loki.")
f.StringVar(&cfg.TailProxyURL, "frontend.tail-proxy-url", "", "URL of querier for tail proxy.")
- f.BoolVar(&cfg.ExperimentalAPIsEnabled, "frontend.experimental-apis-enabled", false, "Whether to enable experimental APIs in the frontend.")
}
diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go
index 397546cfaf98f..820931e97efd3 100644
--- a/pkg/querier/queryrange/codec.go
+++ b/pkg/querier/queryrange/codec.go
@@ -866,7 +866,7 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht
}
u := &url.URL{
- Path: "/loki/api/experimental/detected_fields",
+ Path: "/loki/api/v1/detected_fields",
RawQuery: params.Encode(),
}
req := &http.Request{
@@ -904,7 +904,7 @@ func (c Codec) Path(r queryrangebase.Request) string {
case *logproto.VolumeRequest:
return "/loki/api/v1/index/volume_range"
case *DetectedFieldsRequest:
- return "/loki/api/experimental/detected_fields"
+ return "/loki/api/v1/detected_fields"
}
return "other"
diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go
index ee896639f8a8c..3d1a5daf1afb4 100644
--- a/pkg/querier/queryrange/roundtrip.go
+++ b/pkg/querier/queryrange/roundtrip.go
@@ -432,7 +432,7 @@ func getOperation(path string) string {
return VolumeRangeOp
case path == "/loki/api/v1/index/shards":
return IndexShardsOp
- case path == "/loki/api/experimental/detected_fields":
+ case path == "/loki/api/v1/detected_fields":
return DetectedFieldsOp
default:
return ""
|
fix
|
fix issues with frontend integration (#12406)
|
c9b2907f3c97cf0a14837c0b27cad7a06d84f447
|
2024-10-23 09:09:38
|
Robert Jacob
|
fix: Update renovate ignore for operator API with new module path (#14581)
| false
|
diff --git a/.github/renovate.json b/.github/renovate.json
index e0a417fddf132..903f53254e27c 100644
--- a/.github/renovate.json
+++ b/.github/renovate.json
@@ -16,7 +16,7 @@
"matchFileNames": [ "operator/go.mod" ],
"matchPackageNames": [
"github.com/grafana/loki",
- "github.com/grafana/loki/operator/apis/loki"
+ "github.com/grafana/loki/operator/api/loki"
],
"enabled": false
}
|
fix
|
Update renovate ignore for operator API with new module path (#14581)
|
1331dc5bdfb7f0d6830031721d8286418bc5ac47
|
2024-03-14 20:29:31
|
Travis Patterson
|
fix: add metadata resource and source attrs to stats (#12201)
| false
|
diff --git a/pkg/loghttp/push/otlp.go b/pkg/loghttp/push/otlp.go
index 58a594b01221c..47a5959b1a333 100644
--- a/pkg/loghttp/push/otlp.go
+++ b/pkg/loghttp/push/otlp.go
@@ -30,8 +30,9 @@ const (
func newPushStats() *Stats {
return &Stats{
- LogLinesBytes: map[time.Duration]int64{},
- StructuredMetadataBytes: map[time.Duration]int64{},
+ LogLinesBytes: map[time.Duration]int64{},
+ StructuredMetadataBytes: map[time.Duration]int64{},
+ ResourceAndSourceMetadataLabels: map[time.Duration]push.LabelsAdapter{},
}
}
@@ -146,7 +147,10 @@ func otlpToLokiPushRequest(ld plog.Logs, userID string, tenantsRetention Tenants
}
resourceAttributesAsStructuredMetadataSize := labelsSize(resourceAttributesAsStructuredMetadata)
- stats.StructuredMetadataBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(resourceAttributesAsStructuredMetadataSize)
+ retentionPeriodForUser := tenantsRetention.RetentionPeriodFor(userID, lbs)
+
+ stats.StructuredMetadataBytes[retentionPeriodForUser] += int64(resourceAttributesAsStructuredMetadataSize)
+ stats.ResourceAndSourceMetadataLabels[retentionPeriodForUser] = append(stats.ResourceAndSourceMetadataLabels[retentionPeriodForUser], resourceAttributesAsStructuredMetadata...)
for j := 0; j < sls.Len(); j++ {
scope := sls.At(j).Scope()
@@ -196,7 +200,8 @@ func otlpToLokiPushRequest(ld plog.Logs, userID string, tenantsRetention Tenants
}
scopeAttributesAsStructuredMetadataSize := labelsSize(scopeAttributesAsStructuredMetadata)
- stats.StructuredMetadataBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(scopeAttributesAsStructuredMetadataSize)
+ stats.StructuredMetadataBytes[retentionPeriodForUser] += int64(scopeAttributesAsStructuredMetadataSize)
+ stats.ResourceAndSourceMetadataLabels[retentionPeriodForUser] = append(stats.ResourceAndSourceMetadataLabels[retentionPeriodForUser], scopeAttributesAsStructuredMetadata...)
for k := 0; k < logs.Len(); k++ {
log := logs.At(k)
@@ -217,12 +222,12 @@ func otlpToLokiPushRequest(ld plog.Logs, userID string, tenantsRetention Tenants
pushRequestsByStream[labelsStr] = stream
metadataSize := int64(labelsSize(entry.StructuredMetadata) - resourceAttributesAsStructuredMetadataSize - scopeAttributesAsStructuredMetadataSize)
- stats.StructuredMetadataBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += metadataSize
- stats.LogLinesBytes[tenantsRetention.RetentionPeriodFor(userID, lbs)] += int64(len(entry.Line))
+ stats.StructuredMetadataBytes[retentionPeriodForUser] += metadataSize
+ stats.LogLinesBytes[retentionPeriodForUser] += int64(len(entry.Line))
if tracker != nil {
- tracker.ReceivedBytesAdd(userID, tenantsRetention.RetentionPeriodFor(userID, lbs), lbs, float64(len(entry.Line)))
- tracker.ReceivedBytesAdd(userID, tenantsRetention.RetentionPeriodFor(userID, lbs), lbs, float64(metadataSize))
+ tracker.ReceivedBytesAdd(userID, retentionPeriodForUser, lbs, float64(len(entry.Line)))
+ tracker.ReceivedBytesAdd(userID, retentionPeriodForUser, lbs, float64(metadataSize))
}
stats.NumLines++
diff --git a/pkg/loghttp/push/otlp_test.go b/pkg/loghttp/push/otlp_test.go
index 3f8e3e8a92ca7..6cd0bacada00f 100644
--- a/pkg/loghttp/push/otlp_test.go
+++ b/pkg/loghttp/push/otlp_test.go
@@ -79,6 +79,9 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
StructuredMetadataBytes: map[time.Duration]int64{
time.Hour: 0,
},
+ ResourceAndSourceMetadataLabels: map[time.Duration]push.LabelsAdapter{
+ time.Hour: nil,
+ },
StreamLabelsSize: 21,
MostRecentEntryTimestamp: now,
},
@@ -115,6 +118,9 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
StructuredMetadataBytes: map[time.Duration]int64{
time.Hour: 0,
},
+ ResourceAndSourceMetadataLabels: map[time.Duration]push.LabelsAdapter{
+ time.Hour: nil,
+ },
StreamLabelsSize: 27,
MostRecentEntryTimestamp: now,
},
@@ -152,6 +158,9 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
StructuredMetadataBytes: map[time.Duration]int64{
time.Hour: 0,
},
+ ResourceAndSourceMetadataLabels: map[time.Duration]push.LabelsAdapter{
+ time.Hour: nil,
+ },
StreamLabelsSize: 47,
MostRecentEntryTimestamp: now,
/*
@@ -252,6 +261,13 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
StructuredMetadataBytes: map[time.Duration]int64{
time.Hour: 37,
},
+ ResourceAndSourceMetadataLabels: map[time.Duration]push.LabelsAdapter{
+ time.Hour: []push.LabelAdapter{
+ {Name: "service_image", Value: "loki"},
+ {Name: "op", Value: "buzz"},
+ {Name: "scope_name", Value: "fizz"},
+ },
+ },
StreamLabelsSize: 21,
MostRecentEntryTimestamp: now,
},
@@ -336,6 +352,13 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
StructuredMetadataBytes: map[time.Duration]int64{
time.Hour: 97,
},
+ ResourceAndSourceMetadataLabels: map[time.Duration]push.LabelsAdapter{
+ time.Hour: []push.LabelAdapter{
+ {Name: "resource_nested_foo", Value: "bar"},
+ {Name: "scope_nested_foo", Value: "bar"},
+ {Name: "scope_name", Value: "fizz"},
+ },
+ },
StreamLabelsSize: 21,
MostRecentEntryTimestamp: now,
},
@@ -479,6 +502,14 @@ func TestOTLPToLokiPushRequest(t *testing.T) {
StructuredMetadataBytes: map[time.Duration]int64{
time.Hour: 113,
},
+ ResourceAndSourceMetadataLabels: map[time.Duration]push.LabelsAdapter{
+ time.Hour: []push.LabelAdapter{
+ {Name: "pod_ip", Value: "10.200.200.200"},
+ {Name: "resource_nested_foo", Value: "bar"},
+ {Name: "scope_nested_foo", Value: "bar"},
+ {Name: "scope_name", Value: "fizz"},
+ },
+ },
StreamLabelsSize: 42,
MostRecentEntryTimestamp: now,
},
diff --git a/pkg/loghttp/push/push.go b/pkg/loghttp/push/push.go
index f2acc8717539a..cb49b287d9411 100644
--- a/pkg/loghttp/push/push.go
+++ b/pkg/loghttp/push/push.go
@@ -5,6 +5,7 @@ import (
"compress/gzip"
"fmt"
"github.com/go-kit/log/level"
+ "github.com/grafana/loki/pkg/push"
"io"
"math"
"mime"
@@ -73,16 +74,17 @@ type RequestParser func(userID string, r *http.Request, tenantsRetention Tenants
type RequestParserWrapper func(inner RequestParser) RequestParser
type Stats struct {
- Errs []error
- NumLines int64
- LogLinesBytes map[time.Duration]int64
- StructuredMetadataBytes map[time.Duration]int64
- StreamLabelsSize int64
- MostRecentEntryTimestamp time.Time
- ContentType string
- ContentEncoding string
- BodySize int64
-
+ Errs []error
+ NumLines int64
+ LogLinesBytes map[time.Duration]int64
+ StructuredMetadataBytes map[time.Duration]int64
+ ResourceAndSourceMetadataLabels map[time.Duration]push.LabelsAdapter
+ StreamLabelsSize int64
+ MostRecentEntryTimestamp time.Time
+ ContentType string
+ ContentEncoding string
+
+ BodySize int64
// Extra is a place for a wrapped perser to record any interesting stats as key-value pairs to be logged
Extra []any
}
|
fix
|
add metadata resource and source attrs to stats (#12201)
|
1f3c467b412dabf7f330dc71befcdf50596ba517
|
2024-08-13 19:12:42
|
renovate[bot]
|
fix(deps): update module github.com/datadog/sketches-go to v1.4.6 (#13865)
| false
|
diff --git a/go.mod b/go.mod
index 98f96b72004b5..822c126a6651e 100644
--- a/go.mod
+++ b/go.mod
@@ -114,7 +114,7 @@ require (
require (
github.com/Azure/go-autorest/autorest v0.11.29
- github.com/DataDog/sketches-go v1.4.4
+ github.com/DataDog/sketches-go v1.4.6
github.com/DmitriyVTitov/size v1.5.0
github.com/IBM/go-sdk-core/v5 v5.13.1
github.com/IBM/ibm-cos-sdk-go v1.10.0
diff --git a/go.sum b/go.sum
index 39d0ec92e4640..07561f75760ec 100644
--- a/go.sum
+++ b/go.sum
@@ -244,8 +244,8 @@ github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8=
-github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0=
+github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I=
+github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg=
github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g=
github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
diff --git a/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go b/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go
index 33a0ea5b2bd66..27bd0e7aef303 100644
--- a/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go
+++ b/vendor/github.com/DataDog/sketches-go/ddsketch/ddsketch.go
@@ -173,7 +173,13 @@ func (s *DDSketch) GetValueAtQuantile(quantile float64) (float64, error) {
return math.NaN(), errEmptySketch
}
- rank := quantile * (count - 1)
+ // Use an explicit floating point conversion (as per Go specification) to make sure that no
+ // "fused multiply and add" (FMA) operation is used in the following code subtracting values
+ // from `rank`. Not doing so can lead to inconsistent rounding and return value for this
+ // function, depending on the architecture and whether FMA operations are used or not by the
+ // compiler.
+ rank := float64(quantile * (count - 1))
+
negativeValueCount := s.negativeValueStore.TotalCount()
if rank < negativeValueCount {
return -s.Value(s.negativeValueStore.KeyAtRank(negativeValueCount - 1 - rank)), nil
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 79399e154d408..bc4cba78abd02 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -186,8 +186,8 @@ github.com/Code-Hex/go-generics-cache/policy/lfu
github.com/Code-Hex/go-generics-cache/policy/lru
github.com/Code-Hex/go-generics-cache/policy/mru
github.com/Code-Hex/go-generics-cache/policy/simple
-# github.com/DataDog/sketches-go v1.4.4
-## explicit; go 1.15
+# github.com/DataDog/sketches-go v1.4.6
+## explicit; go 1.18
github.com/DataDog/sketches-go/ddsketch
github.com/DataDog/sketches-go/ddsketch/encoding
github.com/DataDog/sketches-go/ddsketch/mapping
|
fix
|
update module github.com/datadog/sketches-go to v1.4.6 (#13865)
|
d43ee3f94e8456ef307a6aa8103661968f65c5ce
|
2025-03-01 06:42:07
|
renovate[bot]
|
chore(deps): update dependency @types/node to v22.13.7 (main) (#16512)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index f3ebe9e1cb0ce..9f68f7fba0f67 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -2738,9 +2738,9 @@
"license": "MIT"
},
"node_modules/@types/node": {
- "version": "22.13.6",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.6.tgz",
- "integrity": "sha512-GYmF65GI7417CpZXsEXMjT8goQQDnpRnJnDw6jIYa+le3V/lMazPZ4vZmK1B/9R17fh2VLr2zuy9d/h5xgrLAg==",
+ "version": "22.13.7",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.7.tgz",
+ "integrity": "sha512-oU2q+BsQldB9lYxHNp/5aZO+/Bs0Usa74Abo9mAKulz4ahQyXRHK6UVKYIN8KSC8HXwhWSi7b49JnX+txuac0w==",
"dev": true,
"license": "MIT",
"dependencies": {
|
chore
|
update dependency @types/node to v22.13.7 (main) (#16512)
|
52c8b5c481441b86585e980a0ce1cb8d8890c7f2
|
2024-11-15 21:38:07
|
Paul Rogers
|
chore: Configure renovate to be more in line to the end state (#14943)
| false
|
diff --git a/.github/renovate.json b/.github/renovate.json
deleted file mode 100644
index 99063a284c585..0000000000000
--- a/.github/renovate.json
+++ /dev/null
@@ -1,63 +0,0 @@
-{
- "$schema": "https://docs.renovatebot.com/renovate-schema.json",
- "extends": [
- "config:recommended"
- ],
- "labels": [
- "dependencies"
- ],
- "prHourlyLimit": 4,
- "baseBranches": [
- "main"
- ],
- "packageRules": [
- {
- "matchBaseBranches": [
- "release-2.9.x",
- "release-2.8.x"
- ],
- "enabled": false,
- "matchPackageNames": [
- "*"
- ]
- },
- {
- "matchFileNames": [
- "operator/go.mod"
- ],
- "matchPackageNames": [
- "github.com/grafana/loki",
- "github.com/grafana/loki/operator/api/loki"
- ],
- "enabled": false
- },
- {
- "matchManagers": ["gomod"],
- "matchPackageNames": ["go"],
- "enabled": false
- },
- {
- "matchManagers": ["dockerfile"],
- "matchPackageNames": ["golang", "grafana/loki-build-image"],
- "enabled": false
- }
- ],
- "digest": {
- "enabled": false
- },
- "vulnerabilityAlerts": {
- "enabled": true,
- "addLabels": [
- "area/security"
- ]
- },
- "osvVulnerabilityAlerts": true,
- "prConcurrentLimit": 10,
- "rebaseWhen": "conflicted",
- "branchPrefix": "deps-update/",
- "postUpdateOptions": [
- "gomodTidy"
- ],
- "semanticCommitType": "fix",
- "semanticCommitScope": "deps"
-}
diff --git a/.github/renovate.json5 b/.github/renovate.json5
new file mode 100644
index 0000000000000..f1ae4d4a87c52
--- /dev/null
+++ b/.github/renovate.json5
@@ -0,0 +1,97 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:recommended"
+ ],
+ "labels": [
+ "dependencies"
+ ],
+ "prHourlyLimit": 4,
+ "baseBranches": [
+ "main"
+ ],
+ "packageRules": [
+ {
+ "matchBaseBranches": [
+ "release-2.9.x",
+ "release-2.8.x"
+ ],
+ "enabled": false,
+ "matchPackageNames": [
+ "*"
+ ]
+ },
+ {
+ // Disable Go version updates
+ "matchManagers": ["gomod"],
+ "matchPackageNames": ["go"],
+ "enabled": false
+ },
+ {
+ // Disable Go and loki-build-image updates for Dockerfiles
+ "matchManagers": ["dockerfile"],
+ "matchPackageNames": ["golang", "grafana/loki-build-image"],
+ "enabled": false
+ },
+ {
+ // Don't automatically merge GitHub Actions updates
+ "matchManagers": ["github-actions"],
+ "enabled": true,
+ "autoApprove": false,
+ "automerge": false
+ },
+ {
+ // Separate out Helm updates from other dependencies
+ // Don't automatically merge Helm updates
+ // Updates to this require the docs to be updated
+ "matchManagers": ["helm-requirements", "helm-values", "helmv3"],
+ "groupName": "helm-{{packageName}}",
+ "autoApprove": false,
+ "automerge": false
+ },
+ {
+ // Separate out lambda-promtail updates from other dependencies
+ // Don't automatically merge lambda-promtail updates
+ // Updates to this require the nix SHA to be updated
+ "matchFileNames": ["tools/lambda-promtail/go.mod"],
+ "groupName": "lambdapromtail-{{packageName}}",
+ "enabled": true,
+ "autoApprove": false,
+ "automerge": false
+ },
+ {
+ // Disable operator updates
+ "matchFileNames": ["operator/go.mod"],
+ "enabled": false,
+ "autoApprove": false,
+ "automerge": false
+ },
+ {
+ // Enable all other updates
+ "matchFileNames": ["!tools/lambda-promtail/go.mod", "!operator/go.mod"],
+ "groupName": "{{packageName}}",
+ "enabled": true,
+ // After we have tested the above configuration, we can enable the following
+ "automerge": false,
+ "autoApprove": false
+ }
+ ],
+ "digest": {
+ "enabled": false
+ },
+ "vulnerabilityAlerts": {
+ "enabled": true,
+ "addLabels": [
+ "area/security"
+ ]
+ },
+ "osvVulnerabilityAlerts": true,
+ "prConcurrentLimit": 10,
+ "rebaseWhen": "conflicted",
+ "branchPrefix": "deps-update/",
+ "postUpdateOptions": [
+ "gomodTidy"
+ ],
+ "semanticCommitType": "fix",
+ "semanticCommitScope": "deps"
+}
|
chore
|
Configure renovate to be more in line to the end state (#14943)
|
6d09865911d6cfbb1840ff3a6c33e52bed3d97d1
|
2019-11-20 19:40:00
|
Robert Fratto
|
docs: update limits_config to new structure from #948 (#1278)
| false
|
diff --git a/docs/configuration/README.md b/docs/configuration/README.md
index 3e8f5aed5d4f4..3ce31f652d94b 100644
--- a/docs/configuration/README.md
+++ b/docs/configuration/README.md
@@ -8,6 +8,7 @@ Configuration examples can be found in the [Configuration Examples](examples.md)
* [Configuration File Reference](#configuration-file-reference)
* [server_config](#server_config)
+* [distributor_config](#distributor_config)
* [querier_config](#querier_config)
* [ingester_client_config](#ingester_client_config)
* [grpc_client_config](#grpc_client_config)
@@ -58,6 +59,9 @@ Supported contents and default values of `loki.yaml`:
# Configures the server of the launched module(s).
[server: <server_config>]
+# Configures the distributor.
+[distributor: <distributor_config>]
+
# Configures the querier. Only appropriate when running all modules or
# just the querier.
[querier: <querier_config>]
@@ -135,6 +139,15 @@ The `server_config` block configures Promtail's behavior as an HTTP server:
[http_path_prefix: <string>]
```
+## distributor_config
+
+The `distributor_config` block configures the Loki Distributor.
+
+```yaml
+# Period at which to reload user ingestion limits.
+[limiter_reload_period: <duration> | default = 5m]
+```
+
## querier_config
The `querier_config` block configures the Loki Querier.
@@ -663,23 +676,13 @@ The `limits_config` block configures global and per-tenant limits for ingesting
logs in Loki.
```yaml
-# Per-user ingestion rate limit in samples per second.
-[ingestion_rate: <float> | default = 25000]
+# Per-user ingestion rate limit in sample size per second. Units in MB.
+[ingestion_rate_mb: <float> | default = 4]
-# Per-user allowed ingestion burst size (in number of samples).
-[ingestion_burst_size: <int> | default = 50000]
-
-# Whether or not, for all users, samples with external labels
-# identifying replicas in an HA Prometheus setup will be handled.
-[accept_ha_samples: <boolean> | default = false]
-
-# Prometheus label to look for in samples to identify a
-# Prometheus HA cluster.
-[ha_cluster_label: <string> | default = "cluster"]
-
-# Prometheus label to look for in samples to identify a Prometheus HA
-# replica.
-[ha_replica_label: <string> | default = "__replica__"]
+# Per-user allowed ingestion burst size (in sample size). Units in MB. Warning,
+# very high limits will be reset every limiter_reload_period defined in
+# distributor_config.
+[ingestion_burst_size_mb: <int> | default = 6]
# Maximum length of a label name.
[max_label_name_length: <int> | default = 1024]
@@ -703,14 +706,8 @@ logs in Loki.
# Enforce every sample has a metric name.
[enforce_metric_name: <boolean> | default = true]
-# Maximum number of samples that a query can return.
-[max_samples_per_query: <int> | default = 1000000]
-
-# Maximum number of active series per user.
-[max_series_per_user: <int> | default = 5000000]
-
-# Maximum number of active series per metric name.
-[max_series_per_metric: <int> | default = 50000]
+# Maximum number of active streams per user.
+[max_streams_per_user: <int> | default = 10e3]
# Maximum number of chunks that can be fetched by a single query.
[max_chunks_per_query: <int> | default = 2000000]
@@ -725,6 +722,9 @@ logs in Loki.
# Cardinality limit for index queries
[cardinality_limit: <int> | default = 100000]
+# Maximum number of stream matchers per query.
+[max_streams_matchers_per_query: <int> | default = 1000]
+
# Filename of per-user overrides file
[per_tenant_override_config: <string>]
|
docs
|
update limits_config to new structure from #948 (#1278)
|
3a5bed438a22d39170b04ca49b0c1467c140772a
|
2024-12-02 15:59:02
|
Owen Diehl
|
refactor: kafka offset signals (#15201)
| false
|
diff --git a/pkg/kafka/partition/reader_service.go b/pkg/kafka/partition/reader_service.go
index b9d92fc7c5d6c..40c1b64b3b790 100644
--- a/pkg/kafka/partition/reader_service.go
+++ b/pkg/kafka/partition/reader_service.go
@@ -17,9 +17,6 @@ import (
)
const (
- kafkaStartOffset = -2
- kafkaEndOffset = -1
-
phaseStarting = "starting"
phaseRunning = "running"
)
@@ -111,7 +108,7 @@ func newReaderService(
consumerFactory: consumerFactory,
logger: log.With(logger, "partition", reader.Partition(), "consumer_group", reader.ConsumerGroup()),
metrics: newServiceMetrics(reg),
- lastProcessedOffset: kafkaEndOffset,
+ lastProcessedOffset: int64(KafkaEndOffset),
}
// Create the committer
@@ -135,12 +132,12 @@ func (s *ReaderService) starting(ctx context.Context) error {
}
if lastCommittedOffset == int64(KafkaEndOffset) {
- level.Warn(logger).Log("msg", fmt.Sprintf("no committed offset found, starting from %d", kafkaStartOffset))
+ level.Warn(logger).Log("msg", fmt.Sprintf("no committed offset found, starting from %d", KafkaStartOffset))
} else {
level.Debug(logger).Log("msg", "last committed offset", "offset", lastCommittedOffset)
}
- consumeOffset := int64(kafkaStartOffset)
+ consumeOffset := int64(KafkaStartOffset)
if lastCommittedOffset >= 0 {
// Read from the next offset.
consumeOffset = lastCommittedOffset + 1
@@ -222,7 +219,7 @@ func (s *ReaderService) fetchUntilLagSatisfied(
for b.Ongoing() {
// Send a direct request to the Kafka backend to fetch the partition start offset.
- partitionStartOffset, err := s.reader.FetchPartitionOffset(ctx, kafkaStartOffset)
+ partitionStartOffset, err := s.reader.FetchPartitionOffset(ctx, KafkaStartOffset)
if err != nil {
level.Warn(logger).Log("msg", "partition reader failed to fetch partition start offset", "err", err)
b.Wait()
@@ -240,7 +237,7 @@ func (s *ReaderService) fetchUntilLagSatisfied(
// We intentionally don't use WaitNextFetchLastProducedOffset() to not introduce further
// latency.
lastProducedOffsetRequestedAt := time.Now()
- lastProducedOffset, err := s.reader.FetchPartitionOffset(ctx, kafkaEndOffset)
+ lastProducedOffset, err := s.reader.FetchPartitionOffset(ctx, KafkaEndOffset)
if err != nil {
level.Warn(logger).Log("msg", "partition reader failed to fetch last produced offset", "err", err)
b.Wait()
|
refactor
|
kafka offset signals (#15201)
|
74327e53747867a40fa02601cfaea889109468ab
|
2022-06-27 15:38:40
|
Periklis Tsirakidis
|
operator: Disable report on overlay openshift (#6504)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 0edd722d7deba..e89dc12eaeabe 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [6504](https://github.com/grafana/loki/pull/6504) **periklis**: Disable usage report on OpenShift
- [6411](https://github.com/grafana/loki/pull/6411) **Red-GV**: Extend schema validation in LokiStack webhook
- [6334](https://github.com/grafana/loki/pull/6433) **periklis**: Move operator cli flags to component config
- [6224](https://github.com/grafana/loki/pull/6224) **periklis**: Add support for GRPC over TLS for Loki components
diff --git a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
index ca683adf12fb2..73421c639953d 100644
--- a/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
+++ b/operator/bundle/manifests/loki-operator-manager-config_v1_configmap.yaml
@@ -22,7 +22,7 @@ data:
enableLokiStackAlerts: true
enableLokiStackGateway: true
enableLokiStackGatewayRoute: true
- enableGrafanaLabsStats: true
+ enableGrafanaLabsStats: false
enableLokiStackWebhook: true
enableAlertingRuleWebhook: true
enableRecordingRuleWebhook: true
diff --git a/operator/config/overlays/openshift/controller_manager_config.yaml b/operator/config/overlays/openshift/controller_manager_config.yaml
index 3be8c04b8df1b..62f0adc083d8d 100644
--- a/operator/config/overlays/openshift/controller_manager_config.yaml
+++ b/operator/config/overlays/openshift/controller_manager_config.yaml
@@ -19,7 +19,7 @@ featureFlags:
enableLokiStackAlerts: true
enableLokiStackGateway: true
enableLokiStackGatewayRoute: true
- enableGrafanaLabsStats: true
+ enableGrafanaLabsStats: false
enableLokiStackWebhook: true
enableAlertingRuleWebhook: true
enableRecordingRuleWebhook: true
|
operator
|
Disable report on overlay openshift (#6504)
|
9934132b0fdbbf38b87761f5341d1342cf01b528
|
2023-07-24 20:23:40
|
Vitaliy
|
docs: fixed typos in caching.md (#10043)
| false
|
diff --git a/docs/sources/operations/caching.md b/docs/sources/operations/caching.md
index 35aa68e0b9ca4..fed9bd80d122e 100644
--- a/docs/sources/operations/caching.md
+++ b/docs/sources/operations/caching.md
@@ -39,7 +39,7 @@ To enable and configure Memcached:
1. Configure Loki to use the cache.
1. If the Helm chart is used
- Set `memcached.chunk_cache.host` to the Memecache address for the chunk cache, `memcached.results_cache.host` to the Memecache address for the query result cache, `memcached.chunk_cache.enabled=true` and `memcached.results_cache.enabled=true`.
+ Set `memcached.chunk_cache.host` to the Memcached address for the chunk cache, `memcached.results_cache.host` to the Memcached address for the query result cache, `memcached.chunk_cache.enabled=true` and `memcached.results_cache.enabled=true`.
Ensure that the connection limit of Memcached is at least `number_of_clients * max_idle_conns`.
|
docs
|
fixed typos in caching.md (#10043)
|
5f98214b4296bf44853a65d29aac8adf8ff60abb
|
2024-12-20 19:50:26
|
renovate[bot]
|
fix(deps): update module github.com/axiomhq/hyperloglog to v0.2.2 (#15524)
| false
|
diff --git a/go.mod b/go.mod
index d75196d19592e..73ac252b4c7da 100644
--- a/go.mod
+++ b/go.mod
@@ -116,7 +116,7 @@ require (
github.com/DmitriyVTitov/size v1.5.0
github.com/IBM/go-sdk-core/v5 v5.18.3
github.com/IBM/ibm-cos-sdk-go v1.12.0
- github.com/axiomhq/hyperloglog v0.2.0
+ github.com/axiomhq/hyperloglog v0.2.2
github.com/buger/jsonparser v1.1.1
github.com/d4l3k/messagediff v1.2.1
github.com/dolthub/swiss v0.2.1
@@ -174,6 +174,7 @@ require (
github.com/gorilla/handlers v1.5.2 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/imdario/mergo v0.3.16 // indirect
+ github.com/kamstrup/intmap v0.5.1 // indirect
github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mdlayher/socket v0.5.1 // indirect
diff --git a/go.sum b/go.sum
index b07aecb1bb477..e30c04121a1f0 100644
--- a/go.sum
+++ b/go.sum
@@ -222,8 +222,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 h1:yDxvkz3/uOKfxnv8YhzOi9m+2OGI
github.com/aws/aws-sdk-go-v2/service/sts v1.32.4/go.mod h1:9XEUty5v5UAsMiFOBJrNibZgwCeOma73jgGwwhgffa8=
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
-github.com/axiomhq/hyperloglog v0.2.0 h1:u1XT3yyY1rjzlWuP6NQIrV4bRYHOaqZaovqjcBEvZJo=
-github.com/axiomhq/hyperloglog v0.2.0/go.mod h1:GcgMjz9gaDKZ3G0UMS6Fq/VkZ4l7uGgcJyxA7M+omIM=
+github.com/axiomhq/hyperloglog v0.2.2 h1:9X9rOdYx82zXKgd1aMsDZNUw3d7DKAHhd2J305HZPA8=
+github.com/axiomhq/hyperloglog v0.2.2/go.mod h1:DLUK9yIzpU5B6YFLjxTIcbHu1g4Y1WQb1m5RH3radaM=
github.com/baidubce/bce-sdk-go v0.9.208 h1:tbtfU0Oawmd422UpUucv5HLNXmHxw9BcLFFbTtkXcDI=
github.com/baidubce/bce-sdk-go v0.9.208/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
@@ -757,6 +757,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kamstrup/intmap v0.5.1 h1:ENGAowczZA+PJPYYlreoqJvWgQVtAmX1l899WfYFVK0=
+github.com/kamstrup/intmap v0.5.1/go.mod h1:gWUVWHKzWj8xpJVFf5GC0O26bWmv3GqdnIX/LMT6Aq4=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
diff --git a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
index 638b291cd23a9..24b39e43562aa 100644
--- a/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
+++ b/vendor/github.com/axiomhq/hyperloglog/hyperloglog.go
@@ -18,7 +18,7 @@ type Sketch struct {
p uint8
m uint32
alpha float64
- tmpSet set
+ tmpSet *set
sparseList *compressedList
regs []uint8
}
@@ -45,7 +45,7 @@ func NewSketch(precision uint8, sparse bool) (*Sketch, error) {
alpha: alpha(float64(m)),
}
if sparse {
- s.tmpSet = set{}
+ s.tmpSet = newSet(0)
s.sparseList = newCompressedList(0)
} else {
s.regs = make([]uint8, m)
@@ -65,7 +65,7 @@ func (sk *Sketch) Clone() *Sketch {
}
func (sk *Sketch) maybeToNormal() {
- if uint32(len(sk.tmpSet))*100 > sk.m {
+ if uint32(sk.tmpSet.Len())*100 > sk.m {
sk.mergeSparse()
if uint32(sk.sparseList.Len()) > sk.m {
sk.toNormal()
@@ -90,9 +90,7 @@ func (sk *Sketch) Merge(other *Sketch) error {
}
func (sk *Sketch) mergeSparseSketch(other *Sketch) {
- for k := range other.tmpSet {
- sk.tmpSet.add(k)
- }
+ sk.tmpSet.Merge(other.tmpSet)
for iter := other.sparseList.Iter(); iter.HasNext(); {
sk.tmpSet.add(iter.Next())
}
@@ -105,10 +103,10 @@ func (sk *Sketch) mergeDenseSketch(other *Sketch) {
}
if other.sparse() {
- for k := range other.tmpSet {
+ other.tmpSet.ForEach(func(k uint32) {
i, r := decodeHash(k, other.p, pp)
sk.insert(i, r)
- }
+ })
for iter := other.sparseList.Iter(); iter.HasNext(); {
i, r := decodeHash(iter.Next(), other.p, pp)
sk.insert(i, r)
@@ -123,7 +121,7 @@ func (sk *Sketch) mergeDenseSketch(other *Sketch) {
}
func (sk *Sketch) toNormal() {
- if len(sk.tmpSet) > 0 {
+ if sk.tmpSet.Len() > 0 {
sk.mergeSparse()
}
@@ -165,17 +163,17 @@ func (sk *Sketch) Estimate() uint64 {
}
func (sk *Sketch) mergeSparse() {
- if len(sk.tmpSet) == 0 {
+ if sk.tmpSet.Len() == 0 {
return
}
- keys := make(uint64Slice, 0, len(sk.tmpSet))
- for k := range sk.tmpSet {
+ keys := make(uint64Slice, 0, sk.tmpSet.Len())
+ sk.tmpSet.ForEach(func(k uint32) {
keys = append(keys, k)
- }
+ })
sort.Sort(keys)
- newList := newCompressedList(4*len(sk.tmpSet) + len(sk.sparseList.b))
+ newList := newCompressedList(4*sk.tmpSet.Len() + sk.sparseList.Len())
for iter, i := sk.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); {
if !iter.HasNext() {
newList.Append(keys[i])
@@ -201,7 +199,7 @@ func (sk *Sketch) mergeSparse() {
}
sk.sparseList = newList
- sk.tmpSet = set{}
+ sk.tmpSet = newSet(0)
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
@@ -277,7 +275,7 @@ func (sk *Sketch) UnmarshalBinary(data []byte) error {
sparse := data[3] == byte(1)
// Make a newSketch Sketch if the precision doesn't match or if the Sketch was used
- if sk.p != p || sk.regs != nil || len(sk.tmpSet) > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) {
+ if sk.p != p || sk.regs != nil || sk.tmpSet.Len() > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) {
newh, err := NewSketch(p, sparse)
if err != nil {
return err
@@ -292,14 +290,14 @@ func (sk *Sketch) UnmarshalBinary(data []byte) error {
// Unmarshal the tmp_set.
tssz := binary.BigEndian.Uint32(data[4:8])
- sk.tmpSet = make(map[uint32]struct{}, tssz)
+ sk.tmpSet = newSet(int(tssz))
// We need to unmarshal tssz values in total, and each value requires us
// to read 4 bytes.
tsLastByte := int((tssz * 4) + 8)
for i := 8; i < tsLastByte; i += 4 {
k := binary.BigEndian.Uint32(data[i : i+4])
- sk.tmpSet[k] = struct{}{}
+ sk.tmpSet.add(k)
}
// Unmarshal the sparse Sketch.
diff --git a/vendor/github.com/axiomhq/hyperloglog/sparse.go b/vendor/github.com/axiomhq/hyperloglog/sparse.go
index 8c457d3278224..0151740df9859 100644
--- a/vendor/github.com/axiomhq/hyperloglog/sparse.go
+++ b/vendor/github.com/axiomhq/hyperloglog/sparse.go
@@ -2,6 +2,8 @@ package hyperloglog
import (
"math/bits"
+
+ "github.com/kamstrup/intmap"
)
func getIndex(k uint32, p, pp uint8) uint32 {
@@ -34,37 +36,61 @@ func decodeHash(k uint32, p, pp uint8) (uint32, uint8) {
return getIndex(k, p, pp), r
}
-type set map[uint32]struct{}
+type set struct {
+ m *intmap.Set[uint32]
+}
+
+func newSet(size int) *set {
+ return &set{m: intmap.NewSet[uint32](size)}
+}
+
+func (s *set) ForEach(fn func(v uint32)) {
+ s.m.ForEach(func(v uint32) bool {
+ fn(v)
+ return true
+ })
+}
+
+func (s *set) Merge(other *set) {
+ other.m.ForEach(func(v uint32) bool {
+ s.m.Add(v)
+ return true
+ })
+}
+
+func (s *set) Len() int {
+ return s.m.Len()
+}
-func (s set) add(v uint32) bool {
- _, ok := s[v]
- if ok {
+func (s *set) add(v uint32) bool {
+ if s.m.Has(v) {
return false
}
- s[v] = struct{}{}
+ s.m.Add(v)
return true
}
-func (s set) Clone() set {
+func (s *set) Clone() *set {
if s == nil {
return nil
}
- newS := make(map[uint32]struct{}, len(s))
- for k, v := range s {
- newS[k] = v
- }
- return newS
+ newS := intmap.NewSet[uint32](s.m.Len())
+ s.m.ForEach(func(v uint32) bool {
+ newS.Add(v)
+ return true
+ })
+ return &set{m: newS}
}
-func (s set) MarshalBinary() (data []byte, err error) {
+func (s *set) MarshalBinary() (data []byte, err error) {
// 4 bytes for the size of the set, and 4 bytes for each key.
// list.
- data = make([]byte, 0, 4+(4*len(s)))
+ data = make([]byte, 0, 4+(4*s.m.Len()))
// Length of the set. We only need 32 bits because the size of the set
// couldn't exceed that on 32 bit architectures.
- sl := len(s)
+ sl := s.m.Len()
data = append(data, []byte{
byte(sl >> 24),
byte(sl >> 16),
@@ -73,14 +99,15 @@ func (s set) MarshalBinary() (data []byte, err error) {
}...)
// Marshal each element in the set.
- for k := range s {
+ s.m.ForEach(func(k uint32) bool {
data = append(data, []byte{
byte(k >> 24),
byte(k >> 16),
byte(k >> 8),
byte(k),
}...)
- }
+ return true
+ })
return data, nil
}
diff --git a/vendor/github.com/kamstrup/intmap/.gitignore b/vendor/github.com/kamstrup/intmap/.gitignore
new file mode 100644
index 0000000000000..1377554ebea6f
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/.gitignore
@@ -0,0 +1 @@
+*.swp
diff --git a/vendor/github.com/kamstrup/intmap/LICENSE b/vendor/github.com/kamstrup/intmap/LICENSE
new file mode 100644
index 0000000000000..1eac633b0cd30
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2016, Brent Pedersen - Bioinformatics
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/kamstrup/intmap/README.md b/vendor/github.com/kamstrup/intmap/README.md
new file mode 100644
index 0000000000000..e1a1e7003aff8
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/README.md
@@ -0,0 +1,52 @@
+Fast hashmap with integer keys for Golang
+
+[](https://godoc.org/github.com/kamstrup/intmap)
+[](https://goreportcard.com/report/github.com/kamstrup/intmap)
+
+# intmap
+
+ import "github.com/kamstrup/intmap"
+
+Package intmap is a fast hashmap implementation for Golang, specialized for maps with integer type keys.
+The values can be of any type.
+
+It is a full port of https://github.com/brentp/intintmap to use type parameters (aka generics).
+
+It interleaves keys and values in the same underlying array to improve locality.
+This is also known as open addressing with linear probing.
+
+It is up to 3X faster than the builtin map:
+```
+name time/op
+Map64Fill-8 201ms ± 5%
+IntIntMapFill-8 207ms ±31%
+StdMapFill-8 371ms ±11%
+Map64Get10PercentHitRate-8 148µs ±40%
+IntIntMapGet10PercentHitRate-8 171µs ±50%
+StdMapGet10PercentHitRate-8 171µs ±33%
+Map64Get100PercentHitRate-8 4.50ms ± 5%
+IntIntMapGet100PercentHitRate-8 4.82ms ± 6%
+StdMapGet100PercentHitRate-8 15.5ms ±32%
+```
+
+## Usage
+
+```go
+m := intmap.New[int64,int64](32768)
+m.Put(int64(1234), int64(-222))
+m.Put(int64(123), int64(33))
+
+v, ok := m.Get(int64(222))
+v, ok := m.Get(int64(333))
+
+m.Del(int64(222))
+m.Del(int64(333))
+
+fmt.Println(m.Len())
+
+m.ForEach(func(k int64, v int64) {
+ fmt.Printf("key: %d, value: %d\n", k, v)
+})
+
+m.Clear() // all gone, but buffers kept
+```
diff --git a/vendor/github.com/kamstrup/intmap/map64.go b/vendor/github.com/kamstrup/intmap/map64.go
new file mode 100644
index 0000000000000..20926b95fbc8b
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/map64.go
@@ -0,0 +1,442 @@
+// Package intmap contains a fast hashmap implementation for maps with keys of any integer type
+package intmap
+
+import (
+ "iter"
+ "math"
+)
+
+// IntKey is a type constraint for values that can be used as keys in Map
+type IntKey interface {
+ ~int | ~uint | ~int64 | ~uint64 | ~int32 | ~uint32 | ~int16 | ~uint16 | ~int8 | ~uint8 | ~uintptr
+}
+
+type pair[K IntKey, V any] struct {
+ K K
+ V V
+}
+
+const fillFactor64 = 0.7
+
+func phiMix64(x int) int {
+ h := int64(x) * int64(0x9E3779B9)
+ return int(h ^ (h >> 16))
+}
+
+// Map is a hashmap where the keys are some any integer type.
+// It is valid to call methods that read a nil map, similar to a standard Go map.
+// Methods valid on a nil map are Has, Get, Len, and ForEach.
+type Map[K IntKey, V any] struct {
+ data []pair[K, V] // key-value pairs
+ size int
+
+ zeroVal V // value of 'zero' key
+ hasZeroKey bool // do we have 'zero' key in the map?
+}
+
+// New creates a new map with keys being any integer subtype.
+// The map can store up to the given capacity before reallocation and rehashing occurs.
+func New[K IntKey, V any](capacity int) *Map[K, V] {
+ return &Map[K, V]{
+ data: make([]pair[K, V], arraySize(capacity, fillFactor64)),
+ }
+}
+
+// Has checks if the given key exists in the map.
+// Calling this method on a nil map will return false.
+func (m *Map[K, V]) Has(key K) bool {
+ if m == nil {
+ return false
+ }
+
+ if key == K(0) {
+ return m.hasZeroKey
+ }
+
+ idx := m.startIndex(key)
+ p := m.data[idx]
+
+ if p.K == K(0) { // end of chain already
+ return false
+ }
+ if p.K == key { // we check zero prior to this call
+ return true
+ }
+
+ // hash collision, seek next hash match, bailing on first empty
+ for {
+ idx = m.nextIndex(idx)
+ p = m.data[idx]
+ if p.K == K(0) {
+ return false
+ }
+ if p.K == key {
+ return true
+ }
+ }
+}
+
+// Get returns the value if the key is found.
+// If you just need to check for existence it is easier to use Has.
+// Calling this method on a nil map will return the zero value for V and false.
+func (m *Map[K, V]) Get(key K) (V, bool) {
+ if m == nil {
+ var zero V
+ return zero, false
+ }
+
+ if key == K(0) {
+ if m.hasZeroKey {
+ return m.zeroVal, true
+ }
+ var zero V
+ return zero, false
+ }
+
+ idx := m.startIndex(key)
+ p := m.data[idx]
+
+ if p.K == K(0) { // end of chain already
+ var zero V
+ return zero, false
+ }
+ if p.K == key { // we check zero prior to this call
+ return p.V, true
+ }
+
+ // hash collision, seek next hash match, bailing on first empty
+ for {
+ idx = m.nextIndex(idx)
+ p = m.data[idx]
+ if p.K == K(0) {
+ var zero V
+ return zero, false
+ }
+ if p.K == key {
+ return p.V, true
+ }
+ }
+}
+
+// Put adds or updates key with value val.
+func (m *Map[K, V]) Put(key K, val V) {
+ if key == K(0) {
+ if !m.hasZeroKey {
+ m.size++
+ }
+ m.zeroVal = val
+ m.hasZeroKey = true
+ return
+ }
+
+ idx := m.startIndex(key)
+ p := &m.data[idx]
+
+ if p.K == K(0) { // end of chain already
+ p.K = key
+ p.V = val
+ if m.size >= m.sizeThreshold() {
+ m.rehash()
+ } else {
+ m.size++
+ }
+ return
+ } else if p.K == key { // overwrite existing value
+ p.V = val
+ return
+ }
+
+ // hash collision, seek next empty or key match
+ for {
+ idx = m.nextIndex(idx)
+ p = &m.data[idx]
+
+ if p.K == K(0) {
+ p.K = key
+ p.V = val
+ if m.size >= m.sizeThreshold() {
+ m.rehash()
+ } else {
+ m.size++
+ }
+ return
+ } else if p.K == key {
+ p.V = val
+ return
+ }
+ }
+}
+
+// PutIfNotExists adds the key-value pair only if the key does not already exist
+// in the map, and returns the current value associated with the key and a boolean
+// indicating whether the value was newly added or not.
+func (m *Map[K, V]) PutIfNotExists(key K, val V) (V, bool) {
+ if key == K(0) {
+ if m.hasZeroKey {
+ return m.zeroVal, false
+ }
+ m.zeroVal = val
+ m.hasZeroKey = true
+ m.size++
+ return val, true
+ }
+
+ idx := m.startIndex(key)
+ p := &m.data[idx]
+
+ if p.K == K(0) { // end of chain already
+ p.K = key
+ p.V = val
+ m.size++
+ if m.size >= m.sizeThreshold() {
+ m.rehash()
+ }
+ return val, true
+ } else if p.K == key {
+ return p.V, false
+ }
+
+ // hash collision, seek next hash match, bailing on first empty
+ for {
+ idx = m.nextIndex(idx)
+ p = &m.data[idx]
+
+ if p.K == K(0) {
+ p.K = key
+ p.V = val
+ m.size++
+ if m.size >= m.sizeThreshold() {
+ m.rehash()
+ }
+ return val, true
+ } else if p.K == key {
+ return p.V, false
+ }
+ }
+}
+
+// ForEach iterates through key-value pairs in the map while the function f returns true.
+// This method returns immediately if invoked on a nil map.
+//
+// The iteration order of a Map is not defined, so please avoid relying on it.
+func (m *Map[K, V]) ForEach(f func(K, V) bool) {
+ if m == nil {
+ return
+ }
+
+ if m.hasZeroKey && !f(K(0), m.zeroVal) {
+ return
+ }
+ forEach64(m.data, f)
+}
+
+// All returns an iterator over key-value pairs from m.
+// The iterator returns immediately if invoked on a nil map.
+//
+// The iteration order of a Map is not defined, so please avoid relying on it.
+func (m *Map[K, V]) All() iter.Seq2[K, V] {
+ return m.ForEach
+}
+
+// Keys returns an iterator over keys in m.
+// The iterator returns immediately if invoked on a nil map.
+//
+// The iteration order of a Map is not defined, so please avoid relying on it.
+func (m *Map[K, V]) Keys() iter.Seq[K] {
+ return func(yield func(k K) bool) {
+ if m == nil {
+ return
+ }
+
+ if m.hasZeroKey && !yield(K(0)) {
+ return
+ }
+
+ for _, p := range m.data {
+ if p.K != K(0) && !yield(p.K) {
+ return
+ }
+ }
+ }
+}
+
+// Values returns an iterator over values in m.
+// The iterator returns immediately if invoked on a nil map.
+//
+// The iteration order of a Map is not defined, so please avoid relying on it.
+func (m *Map[K, V]) Values() iter.Seq[V] {
+ return func(yield func(v V) bool) {
+ if m == nil {
+ return
+ }
+
+ if m.hasZeroKey && !yield(m.zeroVal) {
+ return
+ }
+
+ for _, p := range m.data {
+ if p.K != K(0) && !yield(p.V) {
+ return
+ }
+ }
+ }
+}
+
+// Clear removes all items from the map, but keeps the internal buffers for reuse.
+func (m *Map[K, V]) Clear() {
+ var zero V
+ m.hasZeroKey = false
+ m.zeroVal = zero
+
+ // compiles down to runtime.memclr()
+ for i := range m.data {
+ m.data[i] = pair[K, V]{}
+ }
+
+ m.size = 0
+}
+
+func (m *Map[K, V]) rehash() {
+ oldData := m.data
+ m.data = make([]pair[K, V], 2*len(m.data))
+
+ // reset size
+ if m.hasZeroKey {
+ m.size = 1
+ } else {
+ m.size = 0
+ }
+
+ forEach64(oldData, func(k K, v V) bool {
+ m.Put(k, v)
+ return true
+ })
+}
+
+// Len returns the number of elements in the map.
+// The length of a nil map is defined to be zero.
+func (m *Map[K, V]) Len() int {
+ if m == nil {
+ return 0
+ }
+
+ return m.size
+}
+
+func (m *Map[K, V]) sizeThreshold() int {
+ return int(math.Floor(float64(len(m.data)) * fillFactor64))
+}
+
+func (m *Map[K, V]) startIndex(key K) int {
+ return phiMix64(int(key)) & (len(m.data) - 1)
+}
+
+func (m *Map[K, V]) nextIndex(idx int) int {
+ return (idx + 1) & (len(m.data) - 1)
+}
+
+func forEach64[K IntKey, V any](pairs []pair[K, V], f func(k K, v V) bool) {
+ for _, p := range pairs {
+ if p.K != K(0) && !f(p.K, p.V) {
+ return
+ }
+ }
+}
+
+// Del deletes a key and its value, returning true iff the key was found
+func (m *Map[K, V]) Del(key K) bool {
+ if key == K(0) {
+ if m.hasZeroKey {
+ m.hasZeroKey = false
+ m.size--
+ return true
+ }
+ return false
+ }
+
+ idx := m.startIndex(key)
+ p := m.data[idx]
+
+ if p.K == key {
+ // any keys that were pushed back needs to be shifted nack into the empty slot
+ // to avoid breaking the chain
+ m.shiftKeys(idx)
+ m.size--
+ return true
+ } else if p.K == K(0) { // end of chain already
+ return false
+ }
+
+ for {
+ idx = m.nextIndex(idx)
+ p = m.data[idx]
+
+ if p.K == key {
+ // any keys that were pushed back needs to be shifted nack into the empty slot
+ // to avoid breaking the chain
+ m.shiftKeys(idx)
+ m.size--
+ return true
+ } else if p.K == K(0) {
+ return false
+ }
+
+ }
+}
+
+func (m *Map[K, V]) shiftKeys(idx int) int {
+ // Shift entries with the same hash.
+ // We need to do this on deletion to ensure we don't have zeroes in the hash chain
+ for {
+ var p pair[K, V]
+ lastIdx := idx
+ idx = m.nextIndex(idx)
+ for {
+ p = m.data[idx]
+ if p.K == K(0) {
+ m.data[lastIdx] = pair[K, V]{}
+ return lastIdx
+ }
+
+ slot := m.startIndex(p.K)
+ if lastIdx <= idx {
+ if lastIdx >= slot || slot > idx {
+ break
+ }
+ } else {
+ if lastIdx >= slot && slot > idx {
+ break
+ }
+ }
+ idx = m.nextIndex(idx)
+ }
+ m.data[lastIdx] = p
+ }
+}
+
+func nextPowerOf2(x uint32) uint32 {
+ if x == math.MaxUint32 {
+ return x
+ }
+
+ if x == 0 {
+ return 1
+ }
+
+ x--
+ x |= x >> 1
+ x |= x >> 2
+ x |= x >> 4
+ x |= x >> 8
+ x |= x >> 16
+
+ return x + 1
+}
+
+func arraySize(exp int, fill float64) int {
+ s := nextPowerOf2(uint32(math.Ceil(float64(exp) / fill)))
+ if s < 2 {
+ s = 2
+ }
+ return int(s)
+}
diff --git a/vendor/github.com/kamstrup/intmap/set.go b/vendor/github.com/kamstrup/intmap/set.go
new file mode 100644
index 0000000000000..b81ce224b6036
--- /dev/null
+++ b/vendor/github.com/kamstrup/intmap/set.go
@@ -0,0 +1,59 @@
+package intmap
+
+import "iter"
+
+// Set is a specialization of Map modelling a set of integers.
+// Like Map, methods that read from the set are valid on the nil Set.
+// This include Has, Len, and ForEach.
+type Set[K IntKey] Map[K, struct{}]
+
+// NewSet creates a new Set with a given initial capacity.
+func NewSet[K IntKey](capacity int) *Set[K] {
+ return (*Set[K])(New[K, struct{}](capacity))
+}
+
+// Add an element to the set. Returns true if the element was not already present.
+func (s *Set[K]) Add(k K) bool {
+ _, found := (*Map[K, struct{}])(s).PutIfNotExists(k, struct{}{})
+ return found
+}
+
+// Del deletes a key, returning true iff the key was found
+func (s *Set[K]) Del(k K) bool {
+ return (*Map[K, struct{}])(s).Del(k)
+}
+
+// Clear removes all items from the Set, but keeps the internal buffers for reuse.
+func (s *Set[K]) Clear() {
+ (*Map[K, struct{}])(s).Clear()
+}
+
+// Has returns true if the key is in the set.
+// If the set is nil this method always return false.
+func (s *Set[K]) Has(k K) bool {
+ return (*Map[K, struct{}])(s).Has(k)
+}
+
+// Len returns the number of elements in the set.
+// If the set is nil this method return 0.
+func (s *Set[K]) Len() int {
+ return (*Map[K, struct{}])(s).Len()
+}
+
+// ForEach iterates over the elements in the set while the visit function returns true.
+// This method returns immediately if the set is nil.
+//
+// The iteration order of a Set is not defined, so please avoid relying on it.
+func (s *Set[K]) ForEach(visit func(k K) bool) {
+ (*Map[K, struct{}])(s).ForEach(func(k K, _ struct{}) bool {
+ return visit(k)
+ })
+}
+
+// All returns an iterator over keys from the set.
+// The iterator returns immediately if the set is nil.
+//
+// The iteration order of a Set is not defined, so please avoid relying on it.
+func (s *Set[K]) All() iter.Seq[K] {
+ return s.ForEach
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 83013676f3460..553a386090a40 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -487,8 +487,8 @@ github.com/aws/smithy-go/time
github.com/aws/smithy-go/tracing
github.com/aws/smithy-go/transport/http
github.com/aws/smithy-go/transport/http/internal/io
-# github.com/axiomhq/hyperloglog v0.2.0
-## explicit; go 1.21
+# github.com/axiomhq/hyperloglog v0.2.2
+## explicit; go 1.23
github.com/axiomhq/hyperloglog
# github.com/baidubce/bce-sdk-go v0.9.208
## explicit; go 1.11
@@ -1194,6 +1194,9 @@ github.com/json-iterator/go
# github.com/julienschmidt/httprouter v1.3.0
## explicit; go 1.7
github.com/julienschmidt/httprouter
+# github.com/kamstrup/intmap v0.5.1
+## explicit; go 1.23
+github.com/kamstrup/intmap
# github.com/klauspost/compress v1.17.11
## explicit; go 1.21
github.com/klauspost/compress
|
fix
|
update module github.com/axiomhq/hyperloglog to v0.2.2 (#15524)
|
b3a2cec7c010bf1f5341a515e64c287ab3297e0e
|
2022-03-24 18:34:46
|
Sashank Agarwal
|
operator: Fix immediate reset of degraded condition (#5691)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 0fd911525f622..ba0661bf9b32e 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [5691](https://github.com/grafana/loki/pull/5691) **sasagarw**: Fix immediate reset of degraded condition
- [5704](https://github.com/grafana/loki/pull/5704) **xperimental**: Update operator-sdk to 1.18.1
- [5693](https://github.com/grafana/loki/pull/5693) **periklis**: Replace frontend_worker parallelism with match_max_concurrent
- [5699](https://github.com/grafana/loki/pull/5699) **Red-GV**: Configure boltdb_shipper and schema to use Azure, GCS, and Swift storage
diff --git a/operator/controllers/lokistack_controller.go b/operator/controllers/lokistack_controller.go
index 2ce1191acc7fd..da66c6e26f0c4 100644
--- a/operator/controllers/lokistack_controller.go
+++ b/operator/controllers/lokistack_controller.go
@@ -2,6 +2,7 @@ package controllers
import (
"context"
+ "errors"
"time"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
@@ -106,6 +107,23 @@ func (r *LokiStackReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
err = handlers.CreateOrUpdateLokiStack(ctx, r.Log, req, r.Client, r.Scheme, r.Flags)
+
+ var degraded *status.DegradedError
+ if errors.As(err, °raded) {
+ err = status.SetDegradedCondition(ctx, r.Client, req, degraded.Message, degraded.Reason)
+ if err != nil {
+ return ctrl.Result{
+ Requeue: true,
+ RequeueAfter: time.Second,
+ }, err
+ }
+
+ return ctrl.Result{
+ Requeue: degraded.Requeue,
+ RequeueAfter: time.Second,
+ }, nil
+ }
+
if err != nil {
return ctrl.Result{
Requeue: true,
diff --git a/operator/internal/handlers/internal/gateway/base_domain.go b/operator/internal/handlers/internal/gateway/base_domain.go
index b2735dd1a5ba2..f5c84fc6871dd 100644
--- a/operator/internal/handlers/internal/gateway/base_domain.go
+++ b/operator/internal/handlers/internal/gateway/base_domain.go
@@ -23,15 +23,11 @@ func GetOpenShiftBaseDomain(ctx context.Context, k k8s.Client, req ctrl.Request)
if err := k.Get(ctx, key, &cluster); err != nil {
if apierrors.IsNotFound(err) {
- statusErr := status.SetDegradedCondition(ctx, k, req,
- "Missing cluster DNS configuration to read base domain",
- lokiv1beta1.ReasonMissingGatewayOpenShiftBaseDomain,
- )
- if statusErr != nil {
- return "", statusErr
+ return "", &status.DegradedError{
+ Message: "Missing cluster DNS configuration to read base domain",
+ Reason: lokiv1beta1.ReasonMissingGatewayOpenShiftBaseDomain,
+ Requeue: true,
}
-
- return "", kverrors.Wrap(err, "Missing cluster DNS configuration to read base domain")
}
return "", kverrors.Wrap(err, "failed to lookup lokistack gateway base domain",
"name", key)
diff --git a/operator/internal/handlers/internal/gateway/tenant_secrets.go b/operator/internal/handlers/internal/gateway/tenant_secrets.go
index 0ca3c9bbbb8ae..45273a7ec55f8 100644
--- a/operator/internal/handlers/internal/gateway/tenant_secrets.go
+++ b/operator/internal/handlers/internal/gateway/tenant_secrets.go
@@ -37,15 +37,11 @@ func GetTenantSecrets(
key := client.ObjectKey{Name: tenant.OIDC.Secret.Name, Namespace: req.Namespace}
if err := k.Get(ctx, key, &gatewaySecret); err != nil {
if apierrors.IsNotFound(err) {
- statusErr := status.SetDegradedCondition(ctx, k, req,
- fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName),
- lokiv1beta1.ReasonMissingGatewayTenantSecret,
- )
- if statusErr != nil {
- return nil, statusErr
+ return nil, &status.DegradedError{
+ Message: fmt.Sprintf("Missing secrets for tenant %s", tenant.TenantName),
+ Reason: lokiv1beta1.ReasonMissingGatewayTenantSecret,
+ Requeue: true,
}
-
- return nil, kverrors.Wrap(err, "Missing gateway secrets")
}
return nil, kverrors.Wrap(err, "failed to lookup lokistack gateway tenant secret",
"name", key)
@@ -54,15 +50,11 @@ func GetTenantSecrets(
var ts *manifests.TenantSecrets
ts, err := secrets.ExtractGatewaySecret(&gatewaySecret, tenant.TenantName)
if err != nil {
- statusErr := status.SetDegradedCondition(ctx, k, req,
- "Invalid gateway tenant secret contents",
- lokiv1beta1.ReasonInvalidGatewayTenantSecret,
- )
- if statusErr != nil {
- return nil, statusErr
+ return nil, &status.DegradedError{
+ Message: "Invalid gateway tenant secret contents",
+ Reason: lokiv1beta1.ReasonInvalidGatewayTenantSecret,
+ Requeue: true,
}
-
- return nil, kverrors.Wrap(err, "Invalid gateway tenant secret")
}
tenantSecrets = append(tenantSecrets, ts)
}
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index 09f1ba30c562a..b5eb20f447022 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -25,7 +25,14 @@ import (
)
// CreateOrUpdateLokiStack handles LokiStack create and update events.
-func CreateOrUpdateLokiStack(ctx context.Context, log logr.Logger, req ctrl.Request, k k8s.Client, s *runtime.Scheme, flags manifests.FeatureFlags) error {
+func CreateOrUpdateLokiStack(
+ ctx context.Context,
+ log logr.Logger,
+ req ctrl.Request,
+ k k8s.Client,
+ s *runtime.Scheme,
+ flags manifests.FeatureFlags,
+) error {
ll := log.WithValues("lokistack", req.NamespacedName, "event", "createOrUpdate")
var stack lokiv1beta1.LokiStack
@@ -52,20 +59,22 @@ func CreateOrUpdateLokiStack(ctx context.Context, log logr.Logger, req ctrl.Requ
key := client.ObjectKey{Name: stack.Spec.Storage.Secret.Name, Namespace: stack.Namespace}
if err := k.Get(ctx, key, &storageSecret); err != nil {
if apierrors.IsNotFound(err) {
- return status.SetDegradedCondition(ctx, k, req,
- "Missing object storage secret",
- lokiv1beta1.ReasonMissingObjectStorageSecret,
- )
+ return &status.DegradedError{
+ Message: "Missing object storage secret",
+ Reason: lokiv1beta1.ReasonMissingObjectStorageSecret,
+ Requeue: false,
+ }
}
return kverrors.Wrap(err, "failed to lookup lokistack storage secret", "name", key)
}
storage, err := secrets.ExtractStorageSecret(&storageSecret, stack.Spec.Storage.Secret.Type)
if err != nil {
- return status.SetDegradedCondition(ctx, k, req,
- "Invalid object storage secret contents",
- lokiv1beta1.ReasonInvalidObjectStorageSecret,
- )
+ return &status.DegradedError{
+ Message: "Invalid object storage secret contents",
+ Reason: lokiv1beta1.ReasonInvalidObjectStorageSecret,
+ Requeue: false,
+ }
}
var (
@@ -74,16 +83,18 @@ func CreateOrUpdateLokiStack(ctx context.Context, log logr.Logger, req ctrl.Requ
tenantConfigMap map[string]openshift.TenantData
)
if flags.EnableGateway && stack.Spec.Tenants == nil {
- return status.SetDegradedCondition(ctx, k, req,
- "Invalid tenants configuration - TenantsSpec cannot be nil when gateway flag is enabled",
- lokiv1beta1.ReasonInvalidTenantsConfiguration,
- )
+ return &status.DegradedError{
+ Message: "Invalid tenants configuration - TenantsSpec cannot be nil when gateway flag is enabled",
+ Reason: lokiv1beta1.ReasonInvalidTenantsConfiguration,
+ Requeue: false,
+ }
} else if flags.EnableGateway && stack.Spec.Tenants != nil {
if err = gateway.ValidateModes(stack); err != nil {
- return status.SetDegradedCondition(ctx, k, req,
- fmt.Sprintf("Invalid tenants configuration: %s", err),
- lokiv1beta1.ReasonInvalidTenantsConfiguration,
- )
+ return &status.DegradedError{
+ Message: fmt.Sprintf("Invalid tenants configuration: %s", err),
+ Reason: lokiv1beta1.ReasonInvalidTenantsConfiguration,
+ Requeue: false,
+ }
}
if stack.Spec.Tenants.Mode != lokiv1beta1.OpenshiftLogging {
@@ -96,7 +107,7 @@ func CreateOrUpdateLokiStack(ctx context.Context, log logr.Logger, req ctrl.Requ
if stack.Spec.Tenants.Mode == lokiv1beta1.OpenshiftLogging {
baseDomain, err = gateway.GetOpenShiftBaseDomain(ctx, k, req)
if err != nil {
- return nil
+ return err
}
// extract the existing tenant's id, cookieSecret if exists, otherwise create new.
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index 37173824b81af..43ae8cee17884 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -12,6 +12,7 @@ import (
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/handlers"
"github.com/grafana/loki/operator/internal/manifests"
+ "github.com/grafana/loki/operator/internal/status"
"github.com/ViaQ/logerr/log"
routev1 "github.com/openshift/api/route/v1"
@@ -620,6 +621,12 @@ func TestCreateOrUpdateLokiStack_WhenMissingSecret_SetDegraded(t *testing.T) {
},
}
+ degradedErr := &status.DegradedError{
+ Message: "Missing object storage secret",
+ Reason: lokiv1beta1.ReasonMissingObjectStorageSecret,
+ Requeue: false,
+ }
+
stack := &lokiv1beta1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
@@ -654,12 +661,9 @@ func TestCreateOrUpdateLokiStack_WhenMissingSecret_SetDegraded(t *testing.T) {
err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
- // make sure error is returned to re-trigger reconciliation
- require.NoError(t, err)
-
- // make sure status and status-update calls
- require.NotZero(t, k.StatusCallCount())
- require.NotZero(t, sw.UpdateCallCount())
+ // make sure error is returned
+ require.Error(t, err)
+ require.Equal(t, degradedErr, err)
}
func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) {
@@ -672,6 +676,12 @@ func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) {
},
}
+ degradedErr := &status.DegradedError{
+ Message: "Invalid object storage secret contents",
+ Reason: lokiv1beta1.ReasonInvalidObjectStorageSecret,
+ Requeue: false,
+ }
+
stack := &lokiv1beta1.LokiStack{
TypeMeta: metav1.TypeMeta{
Kind: "LokiStack",
@@ -710,12 +720,9 @@ func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) {
err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
- // make sure error is returned to re-trigger reconciliation
- require.NoError(t, err)
-
- // make sure status and status-update calls
- require.NotZero(t, k.StatusCallCount())
- require.NotZero(t, sw.UpdateCallCount())
+ // make sure error is returned
+ require.Error(t, err)
+ require.Equal(t, degradedErr, err)
}
func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *testing.T) {
@@ -728,6 +735,12 @@ func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *
},
}
+ degradedErr := &status.DegradedError{
+ Message: "Invalid tenants configuration: mandatory configuration - missing OPA Url",
+ Reason: lokiv1beta1.ReasonInvalidTenantsConfiguration,
+ Requeue: false,
+ }
+
ff := manifests.FeatureFlags{
EnableGateway: true,
}
@@ -785,12 +798,9 @@ func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *
err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff)
- // make sure error is returned to re-trigger reconciliation
- require.NoError(t, err)
-
- // make sure status and status-update calls
- require.NotZero(t, k.StatusCallCount())
- require.NotZero(t, sw.UpdateCallCount())
+ // make sure error is returned
+ require.Error(t, err)
+ require.Equal(t, degradedErr, err)
}
func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing.T) {
@@ -803,6 +813,12 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
},
}
+ degradedErr := &status.DegradedError{
+ Message: "Missing secrets for tenant test",
+ Reason: lokiv1beta1.ReasonMissingGatewayTenantSecret,
+ Requeue: true,
+ }
+
ff := manifests.FeatureFlags{
EnableGateway: true,
}
@@ -867,10 +883,7 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
-
- // make sure status and status-update calls
- require.NotZero(t, k.StatusCallCount())
- require.NotZero(t, sw.UpdateCallCount())
+ require.Equal(t, degradedErr, err)
}
func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing.T) {
@@ -883,6 +896,12 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
},
}
+ degradedErr := &status.DegradedError{
+ Message: "Invalid gateway tenant secret contents",
+ Reason: lokiv1beta1.ReasonInvalidGatewayTenantSecret,
+ Requeue: true,
+ }
+
ff := manifests.FeatureFlags{
EnableGateway: true,
}
@@ -951,10 +970,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
-
- // make sure status and status-update calls
- require.NotZero(t, k.StatusCallCount())
- require.NotZero(t, sw.UpdateCallCount())
+ require.Equal(t, degradedErr, err)
}
func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
@@ -967,6 +983,12 @@ func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
},
}
+ degradedErr := &status.DegradedError{
+ Message: "Invalid tenants configuration - TenantsSpec cannot be nil when gateway flag is enabled",
+ Reason: lokiv1beta1.ReasonInvalidTenantsConfiguration,
+ Requeue: false,
+ }
+
ff := manifests.FeatureFlags{
EnableGateway: true,
}
@@ -1011,10 +1033,7 @@ func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff)
- // make sure no error is returned
- require.NoError(t, err)
-
- // make sure status and status-update calls
- require.NotZero(t, k.StatusCallCount())
- require.NotZero(t, sw.UpdateCallCount())
+ // make sure error is returned
+ require.Error(t, err)
+ require.Equal(t, degradedErr, err)
}
diff --git a/operator/internal/status/lokistack.go b/operator/internal/status/lokistack.go
index a5bf802ba5758..b1e8bf46a2b48 100644
--- a/operator/internal/status/lokistack.go
+++ b/operator/internal/status/lokistack.go
@@ -2,6 +2,7 @@ package status
import (
"context"
+ "fmt"
"github.com/ViaQ/logerr/kverrors"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
@@ -13,6 +14,17 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
+// DegradedError contains information about why the managed LokiStack has an invalid configuration.
+type DegradedError struct {
+ Message string
+ Reason lokiv1beta1.LokiStackConditionReason
+ Requeue bool
+}
+
+func (e *DegradedError) Error() string {
+ return fmt.Sprintf("cluster degraded: %s", e.Message)
+}
+
// SetReadyCondition updates or appends the condition Ready to the lokistack status conditions.
// In addition it resets all other Status conditions to false.
func SetReadyCondition(ctx context.Context, k k8s.Client, req ctrl.Request) error {
diff --git a/operator/internal/status/status.go b/operator/internal/status/status.go
index d0db7731505a7..52bc2d8910f90 100644
--- a/operator/internal/status/status.go
+++ b/operator/internal/status/status.go
@@ -35,13 +35,17 @@ func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request) error {
len(cs.Distributor[corev1.PodFailed]) +
len(cs.Ingester[corev1.PodFailed]) +
len(cs.Querier[corev1.PodFailed]) +
- len(cs.QueryFrontend[corev1.PodFailed])
+ len(cs.QueryFrontend[corev1.PodFailed]) +
+ len(cs.Gateway[corev1.PodFailed]) +
+ len(cs.IndexGateway[corev1.PodFailed])
unknown := len(cs.Compactor[corev1.PodUnknown]) +
len(cs.Distributor[corev1.PodUnknown]) +
len(cs.Ingester[corev1.PodUnknown]) +
len(cs.Querier[corev1.PodUnknown]) +
- len(cs.QueryFrontend[corev1.PodUnknown])
+ len(cs.QueryFrontend[corev1.PodUnknown]) +
+ len(cs.Gateway[corev1.PodUnknown]) +
+ len(cs.IndexGateway[corev1.PodUnknown])
if failed != 0 || unknown != 0 {
return SetFailedCondition(ctx, k, req)
@@ -52,7 +56,9 @@ func Refresh(ctx context.Context, k k8s.Client, req ctrl.Request) error {
len(cs.Distributor[corev1.PodPending]) +
len(cs.Ingester[corev1.PodPending]) +
len(cs.Querier[corev1.PodPending]) +
- len(cs.QueryFrontend[corev1.PodPending])
+ len(cs.QueryFrontend[corev1.PodPending]) +
+ len(cs.Gateway[corev1.PodPending]) +
+ len(cs.IndexGateway[corev1.PodPending])
if pending != 0 {
return SetPendingCondition(ctx, k, req)
|
operator
|
Fix immediate reset of degraded condition (#5691)
|
d073b25e43663585cd261bd11094a91e8e18f71d
|
2023-01-25 10:35:46
|
Gerard Vanloo
|
operator: Use gRPC instead of http for compactor communications (#8265)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index b32c6972dea87..b3d3329c3ae53 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [8265](https://github.com/grafana/loki/pull/8265) **Red-GV**: Use gRPC compactor service instead of http for retention
- [8038](https://github.com/grafana/loki/pull/8038) **aminesnow**: Add watch on the Alertmanager in OCP's user-workload-monitoring namespace
- [8173](https://github.com/grafana/loki/pull/8173) **periklis**: Remove custom webhook cert mounts for OLM-based deployment (OpenShift)
- [8001](https://github.com/grafana/loki/pull/8001) **aminesnow**: Add API validation to Alertmanager header auth config
diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go
index 75e2925caa539..48cb82f21db67 100644
--- a/operator/internal/manifests/config.go
+++ b/operator/internal/manifests/config.go
@@ -137,9 +137,8 @@ func ConfigOptions(opt Options) config.Options {
Namespace: opt.Namespace,
Name: opt.Name,
Compactor: config.Address{
- FQDN: fqdn(NewCompactorHTTPService(opt).GetName(), opt.Namespace),
- Port: httpPort,
- Protocol: protocol,
+ FQDN: fqdn(NewCompactorGRPCService(opt).GetName(), opt.Namespace),
+ Port: grpcPort,
},
FrontendWorker: config.Address{
FQDN: fqdn(NewQueryFrontendGRPCService(opt).GetName(), opt.Namespace),
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 9bee78247f0eb..3a6991e629723 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -28,7 +28,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -194,9 +194,8 @@ overrides:
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
@@ -265,7 +264,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -462,9 +461,8 @@ overrides:
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
@@ -537,9 +535,8 @@ func TestBuild_ConfigAndRuntimeConfig_CreateLokiConfigFailed(t *testing.T) {
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
@@ -607,7 +604,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -827,9 +824,8 @@ overrides:
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
@@ -945,7 +941,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -1165,9 +1161,8 @@ overrides:
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
@@ -1284,7 +1279,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -1517,9 +1512,8 @@ overrides:
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
@@ -1653,7 +1647,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -1910,9 +1904,8 @@ overrides:
Port: 9095,
},
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
StorageDirectory: "/tmp/loki",
MaxConcurrent: MaxConcurrent{
@@ -1967,7 +1960,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -2213,9 +2206,8 @@ overrides:
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
@@ -2366,7 +2358,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -2621,9 +2613,8 @@ overrides:
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
@@ -2692,7 +2683,7 @@ common:
access_key_id: test
secret_access_key: test123
s3forcepathstyle: true
- compactor_address: http://loki-compactor-http-lokistack-dev.default.svc.cluster.local:3100
+ compactor_grpc_address: loki-compactor-grpc-lokistack-dev.default.svc.cluster.local:9095
compactor:
compaction_interval: 2h
working_directory: /tmp/loki/compactor
@@ -2966,9 +2957,8 @@ overrides:
Namespace: "test-ns",
Name: "test",
Compactor: Address{
- FQDN: "loki-compactor-http-lokistack-dev.default.svc.cluster.local",
- Port: 3100,
- Protocol: "http",
+ FQDN: "loki-compactor-grpc-lokistack-dev.default.svc.cluster.local",
+ Port: 9095,
},
FrontendWorker: Address{
FQDN: "loki-query-frontend-grpc-lokistack-dev.default.svc.cluster.local",
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index 0b497bdcd0d5b..0bd703a0754aa 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -45,7 +45,7 @@ common:
region_name: {{ .Region }}
container_name: {{ .Container }}
{{- end }}
- compactor_address: {{ .Compactor.Protocol }}://{{ .Compactor.FQDN }}:{{ .Compactor.Port }}
+ compactor_grpc_address: {{ .Compactor.FQDN }}:{{ .Compactor.Port }}
compactor:
compaction_interval: 2h
working_directory: {{ .StorageDirectory }}/compactor
|
operator
|
Use gRPC instead of http for compactor communications (#8265)
|
48087e0e1af855b67c0ce1285bf3b8acbed245a5
|
2023-10-12 17:31:55
|
Periklis Tsirakidis
|
operator: Add missing marker/sweeper panels in retention dashboard (#10854)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 014a544f114b8..a699e7c4b4811 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [10854](https://github.com/grafana/loki/pull/10854) **periklis**: Add missing marker/sweeper panels in retention dashboard
- [10717](https://github.com/grafana/loki/pull/10717) **periklis**: Allow SSE settings in AWS S3 object storage secret
- [10715](https://github.com/grafana/loki/pull/10715) **periklis**: Allow endpoint_suffix in azure object storage secret
- [10562](https://github.com/grafana/loki/pull/10562) **periklis**: Add memberlist IPv6 support
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-retention.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-retention.json
index 2023f0623fb29..39f847e9b7588 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-retention.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-retention.json
@@ -395,7 +395,7 @@
"sort": 2,
"value_type": "individual"
},
- "type": "stat",
+ "type": "singlestat",
"xaxis": {
"buckets": null,
"mode": "time",
@@ -581,6 +581,846 @@
"showTitle": true,
"title": "Compact and Mark",
"titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 7,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count by(action)(loki_boltdb_shipper_retention_marker_table_processed_total{ namespace=~\"$namespace\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{action}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Processed Tables Per Action",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 8,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count by(table,action)(loki_boltdb_shipper_retention_marker_table_processed_total{ namespace=~\"$namespace\" , action=~\"modified|deleted\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{table}}-{{action}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Modified Tables",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 10,
+ "id": 9,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 0,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": true,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (table)(rate(loki_boltdb_shipper_retention_marker_count_total{ namespace=~\"$namespace\"}[$__rate_interval])) >0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{table}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Marks Creation Rate Per Table",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Per Table Marker",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "format": "short",
+ "id": 10,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (increase(loki_boltdb_shipper_retention_marker_count_total{ namespace=~\"$namespace\"}[24h]))",
+ "format": "time_series",
+ "instant": true,
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Marked Chunks (24h)",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "singlestat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 11,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_bucket{ namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "99th Percentile",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_bucket{ namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "50th Percentile",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_sum{ namespace=~\"$namespace\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_count{ namespace=~\"$namespace\"}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Average",
+ "refId": "C",
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Mark Table Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "format": "short",
+ "id": 12,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (increase(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{ namespace=~\"$namespace\"}[24h]))",
+ "format": "time_series",
+ "instant": true,
+ "intervalFactor": 2,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Delete Chunks (24h)",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "singlestat",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 13,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_bucket{ namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "99th Percentile",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_bucket{ namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "50th Percentile",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_sum{ namespace=~\"$namespace\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{ namespace=~\"$namespace\"}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "Average",
+ "refId": "C",
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Delete Latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Sweeper",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 14,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "time() - (loki_boltdb_shipper_retention_sweeper_marker_file_processing_current_time{ namespace=~\"$namespace\"} > 0)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "lag",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Sweeper Lag",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 15,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(loki_boltdb_shipper_retention_sweeper_marker_files_current{ namespace=~\"$namespace\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "count",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Marks Files to Process",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ },
+ {
+ "aliasColors": { },
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fill": 1,
+ "id": 16,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [ ],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [ ],
+ "spaceLength": 10,
+ "span": 4,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (status)(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{ namespace=~\"$namespace\"}[$__rate_interval]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{status}}",
+ "legendLink": null,
+ "step": 10
+ }
+ ],
+ "thresholds": [ ],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Delete Rate Per Status",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": [ ]
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": 0,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "",
+ "titleSize": "h6"
}
],
"schemaVersion": 14,
diff --git a/operator/jsonnet/config.libsonnet b/operator/jsonnet/config.libsonnet
index be75d44e11d39..ec50b795a1de2 100644
--- a/operator/jsonnet/config.libsonnet
+++ b/operator/jsonnet/config.libsonnet
@@ -56,6 +56,15 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
],
},
+ // replaceType updates the type of a panel. Used to
+ // transform "stat" graphs in "singlestat" to ensure OCP Console compatibility
+ local replaceType = function(type, replacement)
+ function(p) p + (
+ if p.type == type then {
+ type: replacement,
+ } else {}
+ ),
+
// dropPanels removes unnecessary panels from the loki dashboards
// that are of obsolete usage on our AWS-based deployment environment.
local dropPanels = function(panels, dropList, fn)
@@ -135,7 +144,7 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
grafanaDashboards+: {
'loki-retention.json'+: {
- local dropList = ['Logs', 'Per Table Marker', 'Sweeper', ''],
+ local dropList = ['Logs'],
local replacements = [
{ from: 'cluster=~"$cluster",', to: '' },
{ from: 'container="compactor"', to: 'container=~".+-compactor"' },
@@ -146,7 +155,7 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
tags: defaultLokiTags(super.tags),
rows: [
r {
- panels: mapPanels([replaceMatchers(replacements)], r.panels),
+ panels: mapPanels([replaceMatchers(replacements), replaceType('stat', 'singlestat')], r.panels),
}
for r in dropPanels(super.rows, dropList, function(p) true)
],
|
operator
|
Add missing marker/sweeper panels in retention dashboard (#10854)
|
2e62abbf47c47041027baf240722b3d76e7bd9a3
|
2024-10-29 22:43:21
|
andriikushch
|
fix: promtail parser for azureeventhubs message without time field (#14218)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dfc370a4ea44b..9b4503d14a7e1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,11 @@
# Changelog
+## Unreleased
+
+### Bug Fixes
+
+* **promtail:** fix parser for azureeventhubs message without time field ([#14218](https://github.com/grafana/loki/pull/14218))
+
## [3.1.1](https://github.com/grafana/loki/compare/v3.1.0...v3.1.1) (2024-08-08)
@@ -12,7 +18,6 @@
* **deps:** bumped dependencies versions to resolve CVEs ([#13789](https://github.com/grafana/loki/issues/13789)) ([34206cd](https://github.com/grafana/loki/commit/34206cd2d6290566034710ae6c2d08af8804bc91))
-
## [3.1.0](https://github.com/grafana/loki/compare/v3.0.0...v3.1.0) (2024-07-02)
diff --git a/clients/pkg/promtail/targets/azureeventhubs/parser.go b/clients/pkg/promtail/targets/azureeventhubs/parser.go
index 0001dc525019e..659f1a2e7a643 100644
--- a/clients/pkg/promtail/targets/azureeventhubs/parser.go
+++ b/clients/pkg/promtail/targets/azureeventhubs/parser.go
@@ -13,7 +13,6 @@ import (
"github.com/prometheus/prometheus/model/relabel"
"github.com/grafana/loki/v3/clients/pkg/promtail/api"
-
"github.com/grafana/loki/v3/pkg/logproto"
)
@@ -33,7 +32,9 @@ func (l azureMonitorResourceLogs) validate() error {
// azureMonitorResourceLog used to unmarshal common schema for Azure resource logs
// https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema
type azureMonitorResourceLog struct {
- Time string `json:"time"`
+ Time string `json:"time"`
+ // Some logs have `time` field, some have `timeStamp` field : https://github.com/grafana/loki/issues/14176
+ TimeStamp string `json:"timeStamp"`
Category string `json:"category"`
ResourceID string `json:"resourceId"`
OperationName string `json:"operationName"`
@@ -41,7 +42,7 @@ type azureMonitorResourceLog struct {
// validate check if fields marked as required by schema for Azure resource log are not empty
func (l azureMonitorResourceLog) validate() error {
- valid := len(l.Time) != 0 &&
+ valid := l.isTimeOrTimeStampFieldSet() &&
len(l.Category) != 0 &&
len(l.ResourceID) != 0 &&
len(l.OperationName) != 0
@@ -53,6 +54,34 @@ func (l azureMonitorResourceLog) validate() error {
return nil
}
+func (l azureMonitorResourceLog) isTimeOrTimeStampFieldSet() bool {
+ return len(l.Time) != 0 || len(l.TimeStamp) != 0
+}
+
+// getTime returns time from `time` or `timeStamp` field. If both fields are set, `time` is used. If both fields are empty, error is returned.
+func (l azureMonitorResourceLog) getTime() (time.Time, error) {
+ if len(l.Time) == 0 && len(l.TimeStamp) == 0 {
+ var t time.Time
+ return t, errors.New("time and timeStamp fields are empty")
+ }
+
+ if len(l.Time) != 0 {
+ t, err := time.Parse(time.RFC3339, l.Time)
+ if err != nil {
+ return t, err
+ }
+
+ return t.UTC(), nil
+ }
+
+ t, err := time.Parse(time.RFC3339, l.TimeStamp)
+ if err != nil {
+ return t, err
+ }
+
+ return t.UTC(), nil
+}
+
type messageParser struct {
disallowCustomMessages bool
}
@@ -153,11 +182,11 @@ func (e *messageParser) parseRecord(record []byte, labelSet model.LabelSet, rela
}
func (e *messageParser) getTime(messageTime time.Time, useIncomingTimestamp bool, logRecord *azureMonitorResourceLog) time.Time {
- if !useIncomingTimestamp || logRecord.Time == "" {
+ if !useIncomingTimestamp || !logRecord.isTimeOrTimeStampFieldSet() {
return messageTime
}
- recordTime, err := time.Parse(time.RFC3339, logRecord.Time)
+ recordTime, err := logRecord.getTime()
if err != nil {
return messageTime
}
diff --git a/clients/pkg/promtail/targets/azureeventhubs/parser_test.go b/clients/pkg/promtail/targets/azureeventhubs/parser_test.go
index 156dc48d961c1..662dce4358790 100644
--- a/clients/pkg/promtail/targets/azureeventhubs/parser_test.go
+++ b/clients/pkg/promtail/targets/azureeventhubs/parser_test.go
@@ -253,3 +253,37 @@ func readFile(t *testing.T, filename string) []byte {
assert.NoError(t, err)
return data
}
+
+func Test_parseMessage_message_without_time_with_time_stamp(t *testing.T) {
+ messageParser := &messageParser{
+ disallowCustomMessages: true,
+ }
+
+ message := &sarama.ConsumerMessage{
+ Value: readFile(t, "testdata/message_without_time_with_time_stamp.json"),
+ Timestamp: time.Date(2023, time.March, 17, 8, 44, 02, 0, time.UTC),
+ }
+
+ entries, err := messageParser.Parse(message, nil, nil, true)
+ assert.NoError(t, err)
+ assert.Len(t, entries, 1)
+
+ expectedLine1 := "{\n \"timeStamp\": \"2024-09-18T00:45:09+00:00\",\n \"resourceId\": \"/RESOURCE_ID\",\n \"operationName\": \"ApplicationGatewayAccess\",\n \"category\": \"ApplicationGatewayAccessLog\"\n }"
+ assert.Equal(t, expectedLine1, entries[0].Line)
+
+ assert.Equal(t, time.Date(2024, time.September, 18, 00, 45, 9, 0, time.UTC), entries[0].Timestamp)
+}
+
+func Test_parseMessage_message_without_time_and_time_stamp(t *testing.T) {
+ messageParser := &messageParser{
+ disallowCustomMessages: true,
+ }
+
+ message := &sarama.ConsumerMessage{
+ Value: readFile(t, "testdata/message_without_time_and_time_stamp.json"),
+ Timestamp: time.Date(2023, time.March, 17, 8, 44, 02, 0, time.UTC),
+ }
+
+ _, err := messageParser.Parse(message, nil, nil, true)
+ assert.EqualError(t, err, "required field or fields is empty")
+}
diff --git a/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_and_time_stamp.json b/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_and_time_stamp.json
new file mode 100644
index 0000000000000..f9fc41ad02aea
--- /dev/null
+++ b/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_and_time_stamp.json
@@ -0,0 +1,9 @@
+{
+ "records": [
+ {
+ "resourceId": "/RESOURCE_ID",
+ "operationName": "ApplicationGatewayAccess",
+ "category": "ApplicationGatewayAccessLog"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_with_time_stamp.json b/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_with_time_stamp.json
new file mode 100644
index 0000000000000..8579fc489761a
--- /dev/null
+++ b/clients/pkg/promtail/targets/azureeventhubs/testdata/message_without_time_with_time_stamp.json
@@ -0,0 +1,10 @@
+{
+ "records": [
+ {
+ "timeStamp": "2024-09-18T00:45:09+00:00",
+ "resourceId": "/RESOURCE_ID",
+ "operationName": "ApplicationGatewayAccess",
+ "category": "ApplicationGatewayAccessLog"
+ }
+ ]
+}
\ No newline at end of file
|
fix
|
promtail parser for azureeventhubs message without time field (#14218)
|
0bc23db95d64c04b910f5ede8b3131bb132933c7
|
2025-01-29 18:23:31
|
Dylan Guedes
|
refactor: Add retention hours to `discarded` metrics (#15875)
| false
|
diff --git a/pkg/compactor/deletion/tenant_request_handler_test.go b/pkg/compactor/deletion/tenant_request_handler_test.go
index cca06f4c18cfe..7178a31bcc0f0 100644
--- a/pkg/compactor/deletion/tenant_request_handler_test.go
+++ b/pkg/compactor/deletion/tenant_request_handler_test.go
@@ -7,8 +7,10 @@ import (
"time"
"github.com/grafana/dskit/user"
+ "github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -81,3 +83,7 @@ func (f *fakeLimits) RetentionPeriod(userID string) time.Duration {
func (f *fakeLimits) StreamRetention(userID string) []validation.StreamRetention {
return f.getLimitForUser(userID).streamRetention
}
+
+func (f *fakeLimits) RetentionHours(userID string, _ labels.Labels) string {
+ return util.RetentionHours(f.getLimitForUser(userID).retentionPeriod)
+}
diff --git a/pkg/compactor/retention/expiration.go b/pkg/compactor/retention/expiration.go
index 45029f9652c5a..3ab412d2306dc 100644
--- a/pkg/compactor/retention/expiration.go
+++ b/pkg/compactor/retention/expiration.go
@@ -8,6 +8,7 @@ import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/filter"
util_log "github.com/grafana/loki/v3/pkg/util/log"
"github.com/grafana/loki/v3/pkg/validation"
@@ -131,6 +132,11 @@ func NewTenantsRetention(l Limits) *TenantsRetention {
}
}
+func (tr *TenantsRetention) RetentionHoursFor(userID string, lbs labels.Labels) string {
+ period := tr.RetentionPeriodFor(userID, lbs)
+ return util.RetentionHours(period)
+}
+
func (tr *TenantsRetention) RetentionPeriodFor(userID string, lbs labels.Labels) time.Duration {
streamRetentions := tr.limits.StreamRetention(userID)
globalRetention := tr.limits.RetentionPeriod(userID)
diff --git a/pkg/compactor/retention/expiration_test.go b/pkg/compactor/retention/expiration_test.go
index 3cc69f88ae613..8824919c4298e 100644
--- a/pkg/compactor/retention/expiration_test.go
+++ b/pkg/compactor/retention/expiration_test.go
@@ -123,6 +123,43 @@ func Test_expirationChecker_Expired(t *testing.T) {
}
}
+func TestTenantsRetention_RetentionPeriodFor(t *testing.T) {
+ sevenDays, err := model.ParseDuration("720h")
+ require.NoError(t, err)
+ oneDay, err := model.ParseDuration("24h")
+ require.NoError(t, err)
+
+ tr := NewTenantsRetention(fakeLimits{
+ defaultLimit: retentionLimit{
+ retentionPeriod: time.Duration(sevenDays),
+ streamRetention: []validation.StreamRetention{
+ {
+ Period: oneDay,
+ Matchers: []*labels.Matcher{
+ labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"),
+ },
+ },
+ },
+ },
+ perTenant: map[string]retentionLimit{
+ "1": {
+ retentionPeriod: time.Duration(sevenDays),
+ streamRetention: []validation.StreamRetention{
+ {
+ Period: oneDay,
+ Matchers: []*labels.Matcher{
+ labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"),
+ },
+ },
+ },
+ },
+ },
+ })
+
+ require.Equal(t, time.Duration(sevenDays), tr.RetentionPeriodFor("1", nil))
+ require.Equal(t, time.Duration(oneDay), tr.RetentionPeriodFor("1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}))
+}
+
func Test_expirationChecker_Expired_zeroValue(t *testing.T) {
// Default retention should be zero
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 6ede42aab1c20..8643b3d0726f0 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -453,8 +453,6 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
// We use the heuristic of 1 sample per TS to size the array.
// We also work out the hash value at the same time.
streams := make([]KeyedStream, 0, len(req.Streams))
- validatedLineSize := 0
- validatedLineCount := 0
var validationErrors util.GroupedErrors
@@ -513,13 +511,14 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
d.truncateLines(validationContext, &stream)
var lbs labels.Labels
- lbs, stream.Labels, stream.Hash, err = d.parseStreamLabels(validationContext, stream.Labels, stream)
+ var retentionHours string
+ lbs, stream.Labels, stream.Hash, retentionHours, err = d.parseStreamLabels(validationContext, stream.Labels, stream)
if err != nil {
d.writeFailuresManager.Log(tenantID, err)
validationErrors.Add(err)
- validation.DiscardedSamples.WithLabelValues(validation.InvalidLabels, tenantID).Add(float64(len(stream.Entries)))
+ validation.DiscardedSamples.WithLabelValues(validation.InvalidLabels, tenantID, retentionHours).Add(float64(len(stream.Entries)))
discardedBytes := util.EntriesTotalSize(stream.Entries)
- validation.DiscardedBytes.WithLabelValues(validation.InvalidLabels, tenantID).Add(float64(discardedBytes))
+ validation.DiscardedBytes.WithLabelValues(validation.InvalidLabels, tenantID, retentionHours).Add(float64(discardedBytes))
continue
}
@@ -527,9 +526,9 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
err := fmt.Errorf(validation.MissingEnforcedLabelsErrorMsg, strings.Join(lbsMissing, ","), tenantID)
d.writeFailuresManager.Log(tenantID, err)
validationErrors.Add(err)
- validation.DiscardedSamples.WithLabelValues(validation.MissingEnforcedLabels, tenantID).Add(float64(len(stream.Entries)))
+ validation.DiscardedSamples.WithLabelValues(validation.MissingEnforcedLabels, tenantID, retentionHours).Add(float64(len(stream.Entries)))
discardedBytes := util.EntriesTotalSize(stream.Entries)
- validation.DiscardedBytes.WithLabelValues(validation.MissingEnforcedLabels, tenantID).Add(float64(discardedBytes))
+ validation.DiscardedBytes.WithLabelValues(validation.MissingEnforcedLabels, tenantID, retentionHours).Add(float64(discardedBytes))
continue
}
@@ -538,7 +537,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
prevTs := stream.Entries[0].Timestamp
for _, entry := range stream.Entries {
- if err := d.validator.ValidateEntry(ctx, validationContext, lbs, entry); err != nil {
+ if err := d.validator.ValidateEntry(ctx, validationContext, lbs, entry, retentionHours); err != nil {
d.writeFailuresManager.Log(tenantID, err)
validationErrors.Add(err)
continue
@@ -593,8 +592,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
n++
- validatedLineSize += util.EntryTotalSize(&entry)
- validatedLineCount++
+ validationContext.validationMetrics.compute(entry, retentionHours)
pushSize += len(entry.Line)
}
stream.Entries = stream.Entries[:n]
@@ -618,7 +616,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
if block, until, retStatusCode := d.validator.ShouldBlockIngestion(validationContext, now); block {
- d.trackDiscardedData(ctx, req, validationContext, tenantID, validatedLineCount, validatedLineSize, validation.BlockedIngestion)
+ d.trackDiscardedData(ctx, req, validationContext, tenantID, validationContext.validationMetrics, validation.BlockedIngestion)
err = fmt.Errorf(validation.BlockedIngestionErrorMsg, tenantID, until.Format(time.RFC3339), retStatusCode)
d.writeFailuresManager.Log(tenantID, err)
@@ -632,10 +630,10 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
return nil, httpgrpc.Errorf(retStatusCode, "%s", err.Error())
}
- if !d.ingestionRateLimiter.AllowN(now, tenantID, validatedLineSize) {
- d.trackDiscardedData(ctx, req, validationContext, tenantID, validatedLineCount, validatedLineSize, validation.RateLimited)
+ if !d.ingestionRateLimiter.AllowN(now, tenantID, validationContext.validationMetrics.lineSize) {
+ d.trackDiscardedData(ctx, req, validationContext, tenantID, validationContext.validationMetrics, validation.RateLimited)
- err = fmt.Errorf(validation.RateLimitedErrorMsg, tenantID, int(d.ingestionRateLimiter.Limit(now, tenantID)), validatedLineCount, validatedLineSize)
+ err = fmt.Errorf(validation.RateLimitedErrorMsg, tenantID, int(d.ingestionRateLimiter.Limit(now, tenantID)), validationContext.validationMetrics.lineCount, validationContext.validationMetrics.lineSize)
d.writeFailuresManager.Log(tenantID, err)
// Return a 429 to indicate to the client they are being rate limited
return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "%s", err.Error())
@@ -769,16 +767,17 @@ func (d *Distributor) trackDiscardedData(
req *logproto.PushRequest,
validationContext validationContext,
tenantID string,
- validatedLineCount int,
- validatedLineSize int,
+ validationMetrics validationMetrics,
reason string,
) {
- validation.DiscardedSamples.WithLabelValues(reason, tenantID).Add(float64(validatedLineCount))
- validation.DiscardedBytes.WithLabelValues(reason, tenantID).Add(float64(validatedLineSize))
+ for retentionHours, count := range validationMetrics.lineCountPerRetentionHours {
+ validation.DiscardedSamples.WithLabelValues(reason, tenantID, retentionHours).Add(float64(count))
+ validation.DiscardedBytes.WithLabelValues(reason, tenantID, retentionHours).Add(float64(validationMetrics.lineSizePerRetentionHours[retentionHours]))
+ }
if d.usageTracker != nil {
for _, stream := range req.Streams {
- lbs, _, _, err := d.parseStreamLabels(validationContext, stream.Labels, stream)
+ lbs, _, _, _, err := d.parseStreamLabels(validationContext, stream.Labels, stream)
if err != nil {
continue
}
@@ -1157,24 +1156,28 @@ type labelData struct {
hash uint64
}
-func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream logproto.Stream) (labels.Labels, string, uint64, error) {
+func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream logproto.Stream) (labels.Labels, string, uint64, string, error) {
if val, ok := d.labelCache.Get(key); ok {
- return val.ls, val.ls.String(), val.hash, nil
+ retentionHours := d.tenantsRetention.RetentionHoursFor(vContext.userID, val.ls)
+ return val.ls, val.ls.String(), val.hash, retentionHours, nil
}
ls, err := syntax.ParseLabels(key)
if err != nil {
- return nil, "", 0, fmt.Errorf(validation.InvalidLabelsErrorMsg, key, err)
+ tenantRetentionHours := d.tenantsRetention.RetentionHoursFor(vContext.userID, nil)
+ return nil, "", 0, tenantRetentionHours, fmt.Errorf(validation.InvalidLabelsErrorMsg, key, err)
}
- if err := d.validator.ValidateLabels(vContext, ls, stream); err != nil {
- return nil, "", 0, err
+ retentionHours := d.tenantsRetention.RetentionHoursFor(vContext.userID, ls)
+
+ if err := d.validator.ValidateLabels(vContext, ls, stream, retentionHours); err != nil {
+ return nil, "", 0, retentionHours, err
}
lsHash := ls.Hash()
d.labelCache.Add(key, labelData{ls, lsHash})
- return ls, ls.String(), lsHash, nil
+ return ls, ls.String(), lsHash, retentionHours, nil
}
// shardCountFor returns the right number of shards to be used by the given stream.
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 91d3fcdf1367b..66aa653a20276 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -1233,7 +1233,7 @@ func Benchmark_SortLabelsOnPush(b *testing.B) {
for n := 0; n < b.N; n++ {
stream := request.Streams[0]
stream.Labels = `{buzz="f", a="b"}`
- _, _, _, err := d.parseStreamLabels(vCtx, stream.Labels, stream)
+ _, _, _, _, err := d.parseStreamLabels(vCtx, stream.Labels, stream)
if err != nil {
panic("parseStreamLabels fail,err:" + err.Error())
}
@@ -1279,7 +1279,7 @@ func TestParseStreamLabels(t *testing.T) {
vCtx := d.validator.getValidationContextForTime(testTime, "123")
t.Run(tc.name, func(t *testing.T) {
- lbs, lbsString, hash, err := d.parseStreamLabels(vCtx, tc.origLabels, logproto.Stream{
+ lbs, lbsString, hash, _, err := d.parseStreamLabels(vCtx, tc.origLabels, logproto.Stream{
Labels: tc.origLabels,
})
if tc.expectedErr != nil {
diff --git a/pkg/distributor/validation_metrics.go b/pkg/distributor/validation_metrics.go
new file mode 100644
index 0000000000000..9f22a65062ba4
--- /dev/null
+++ b/pkg/distributor/validation_metrics.go
@@ -0,0 +1,30 @@
+package distributor
+
+import (
+ "github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/util"
+)
+
+type validationMetrics struct {
+ lineSizePerRetentionHours map[string]int
+ lineCountPerRetentionHours map[string]int
+ lineSize int
+ lineCount int
+ tenantRetentionHours string
+}
+
+func newValidationMetrics(tenantRetentionHours string) validationMetrics {
+ return validationMetrics{
+ lineSizePerRetentionHours: make(map[string]int),
+ lineCountPerRetentionHours: make(map[string]int),
+ tenantRetentionHours: tenantRetentionHours,
+ }
+}
+
+func (v *validationMetrics) compute(entry logproto.Entry, retentionHours string) {
+ totalEntrySize := util.EntryTotalSize(&entry)
+ v.lineSizePerRetentionHours[retentionHours] += totalEntrySize
+ v.lineCountPerRetentionHours[retentionHours]++
+ v.lineSize += totalEntrySize
+ v.lineCount++
+}
diff --git a/pkg/distributor/validator.go b/pkg/distributor/validator.go
index 5aea652225a56..6684c28e066e0 100644
--- a/pkg/distributor/validator.go
+++ b/pkg/distributor/validator.go
@@ -58,9 +58,13 @@ type validationContext struct {
enforcedLabels []string
userID string
+
+ validationMetrics validationMetrics
}
func (v Validator) getValidationContextForTime(now time.Time, userID string) validationContext {
+ retentionHours := util.RetentionHours(v.RetentionPeriod(userID))
+
return validationContext{
userID: userID,
rejectOldSample: v.RejectOldSamples(userID),
@@ -82,11 +86,12 @@ func (v Validator) getValidationContextForTime(now time.Time, userID string) val
blockIngestionUntil: v.BlockIngestionUntil(userID),
blockIngestionStatusCode: v.BlockIngestionStatusCode(userID),
enforcedLabels: v.EnforcedLabels(userID),
+ validationMetrics: newValidationMetrics(retentionHours),
}
}
// ValidateEntry returns an error if the entry is invalid and report metrics for invalid entries accordingly.
-func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, labels labels.Labels, entry logproto.Entry) error {
+func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, labels labels.Labels, entry logproto.Entry, retentionHours string) error {
ts := entry.Timestamp.UnixNano()
validation.LineLengthHist.Observe(float64(len(entry.Line)))
structuredMetadataCount := len(entry.StructuredMetadata)
@@ -97,8 +102,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
// Makes time string on the error message formatted consistently.
formatedEntryTime := entry.Timestamp.Format(timeFormat)
formatedRejectMaxAgeTime := time.Unix(0, vCtx.rejectOldSampleMaxAge).Format(timeFormat)
- validation.DiscardedSamples.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID).Add(entrySize)
+ validation.DiscardedSamples.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID, retentionHours).Inc()
+ validation.DiscardedBytes.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID, retentionHours).Add(entrySize)
if v.usageTracker != nil {
v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.GreaterThanMaxSampleAge, labels, entrySize)
}
@@ -107,8 +112,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
if ts > vCtx.creationGracePeriod {
formatedEntryTime := entry.Timestamp.Format(timeFormat)
- validation.DiscardedSamples.WithLabelValues(validation.TooFarInFuture, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.TooFarInFuture, vCtx.userID).Add(entrySize)
+ validation.DiscardedSamples.WithLabelValues(validation.TooFarInFuture, vCtx.userID, retentionHours).Inc()
+ validation.DiscardedBytes.WithLabelValues(validation.TooFarInFuture, vCtx.userID, retentionHours).Add(entrySize)
if v.usageTracker != nil {
v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.TooFarInFuture, labels, entrySize)
}
@@ -120,8 +125,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
// an orthogonal concept (we need not use ValidateLabels in this context)
// but the upstream cortex_validation pkg uses it, so we keep this
// for parity.
- validation.DiscardedSamples.WithLabelValues(validation.LineTooLong, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.LineTooLong, vCtx.userID).Add(entrySize)
+ validation.DiscardedSamples.WithLabelValues(validation.LineTooLong, vCtx.userID, retentionHours).Inc()
+ validation.DiscardedBytes.WithLabelValues(validation.LineTooLong, vCtx.userID, retentionHours).Add(entrySize)
if v.usageTracker != nil {
v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.LineTooLong, labels, entrySize)
}
@@ -130,8 +135,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
if structuredMetadataCount > 0 {
if !vCtx.allowStructuredMetadata {
- validation.DiscardedSamples.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID).Add(entrySize)
+ validation.DiscardedSamples.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID, retentionHours).Inc()
+ validation.DiscardedBytes.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID, retentionHours).Add(entrySize)
if v.usageTracker != nil {
v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.DisallowedStructuredMetadata, labels, entrySize)
}
@@ -139,8 +144,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
}
if maxSize := vCtx.maxStructuredMetadataSize; maxSize != 0 && structuredMetadataSizeBytes > maxSize {
- validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID).Add(entrySize)
+ validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID, retentionHours).Inc()
+ validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID, retentionHours).Add(entrySize)
if v.usageTracker != nil {
v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooLarge, labels, entrySize)
}
@@ -148,8 +153,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
}
if maxCount := vCtx.maxStructuredMetadataCount; maxCount != 0 && structuredMetadataCount > maxCount {
- validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID).Inc()
- validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID).Add(entrySize)
+ validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID, retentionHours).Inc()
+ validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID, retentionHours).Add(entrySize)
if v.usageTracker != nil {
v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooMany, labels, entrySize)
}
@@ -161,9 +166,9 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la
}
// Validate labels returns an error if the labels are invalid
-func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, stream logproto.Stream) error {
+func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, stream logproto.Stream, retentionHours string) error {
if len(ls) == 0 {
- validation.DiscardedSamples.WithLabelValues(validation.MissingLabels, ctx.userID).Inc()
+ validation.DiscardedSamples.WithLabelValues(validation.MissingLabels, ctx.userID, retentionHours).Inc()
return fmt.Errorf(validation.MissingLabelsErrorMsg)
}
@@ -180,20 +185,20 @@ func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, strea
}
if numLabelNames > ctx.maxLabelNamesPerSeries {
- updateMetrics(validation.MaxLabelNamesPerSeries, ctx.userID, stream)
+ updateMetrics(validation.MaxLabelNamesPerSeries, ctx.userID, stream, retentionHours)
return fmt.Errorf(validation.MaxLabelNamesPerSeriesErrorMsg, stream.Labels, numLabelNames, ctx.maxLabelNamesPerSeries)
}
lastLabelName := ""
for _, l := range ls {
if len(l.Name) > ctx.maxLabelNameLength {
- updateMetrics(validation.LabelNameTooLong, ctx.userID, stream)
+ updateMetrics(validation.LabelNameTooLong, ctx.userID, stream, retentionHours)
return fmt.Errorf(validation.LabelNameTooLongErrorMsg, stream.Labels, l.Name)
} else if len(l.Value) > ctx.maxLabelValueLength {
- updateMetrics(validation.LabelValueTooLong, ctx.userID, stream)
+ updateMetrics(validation.LabelValueTooLong, ctx.userID, stream, retentionHours)
return fmt.Errorf(validation.LabelValueTooLongErrorMsg, stream.Labels, l.Value)
} else if cmp := strings.Compare(lastLabelName, l.Name); cmp == 0 {
- updateMetrics(validation.DuplicateLabelNames, ctx.userID, stream)
+ updateMetrics(validation.DuplicateLabelNames, ctx.userID, stream, retentionHours)
return fmt.Errorf(validation.DuplicateLabelNamesErrorMsg, stream.Labels, l.Name)
}
lastLabelName = l.Name
@@ -210,8 +215,8 @@ func (v Validator) ShouldBlockIngestion(ctx validationContext, now time.Time) (b
return now.Before(ctx.blockIngestionUntil), ctx.blockIngestionUntil, ctx.blockIngestionStatusCode
}
-func updateMetrics(reason, userID string, stream logproto.Stream) {
- validation.DiscardedSamples.WithLabelValues(reason, userID).Add(float64(len(stream.Entries)))
+func updateMetrics(reason, userID string, stream logproto.Stream, retentionHours string) {
+ validation.DiscardedSamples.WithLabelValues(reason, userID, retentionHours).Add(float64(len(stream.Entries)))
bytes := util.EntriesTotalSize(stream.Entries)
- validation.DiscardedBytes.WithLabelValues(reason, userID).Add(float64(bytes))
+ validation.DiscardedBytes.WithLabelValues(reason, userID, retentionHours).Add(float64(bytes))
}
diff --git a/pkg/distributor/validator_test.go b/pkg/distributor/validator_test.go
index 9e51099dfad38..0881bd1a06214 100644
--- a/pkg/distributor/validator_test.go
+++ b/pkg/distributor/validator_test.go
@@ -15,6 +15,7 @@ import (
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/syntax"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -130,8 +131,9 @@ func TestValidator_ValidateEntry(t *testing.T) {
assert.NoError(t, err)
v, err := NewValidator(o, nil)
assert.NoError(t, err)
+ retentionHours := util.RetentionHours(v.RetentionPeriod(tt.userID))
- err = v.ValidateEntry(ctx, v.getValidationContextForTime(testTime, tt.userID), testStreamLabels, tt.entry)
+ err = v.ValidateEntry(ctx, v.getValidationContextForTime(testTime, tt.userID), testStreamLabels, tt.entry, retentionHours)
assert.Equal(t, tt.expected, err)
})
}
@@ -224,12 +226,13 @@ func TestValidator_ValidateLabels(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
l := &validation.Limits{}
flagext.DefaultValues(l)
+ retentionHours := util.RetentionHours(time.Duration(l.RetentionPeriod))
o, err := validation.NewOverrides(*l, tt.overrides)
assert.NoError(t, err)
v, err := NewValidator(o, nil)
assert.NoError(t, err)
- err = v.ValidateLabels(v.getValidationContextForTime(testTime, tt.userID), mustParseLabels(tt.labels), logproto.Stream{Labels: tt.labels})
+ err = v.ValidateLabels(v.getValidationContextForTime(testTime, tt.userID), mustParseLabels(tt.labels), logproto.Stream{Labels: tt.labels}, retentionHours)
assert.Equal(t, tt.expected, err)
})
}
diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go
index 7f30898bd7b7c..317bcb7ce4f50 100644
--- a/pkg/ingester/checkpoint_test.go
+++ b/pkg/ingester/checkpoint_test.go
@@ -16,6 +16,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compactor/retention"
"github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/client"
@@ -460,9 +461,10 @@ func Test_SeriesIterator(t *testing.T) {
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
+ tenantsRetention := retention.NewTenantsRetention(limits)
for i := 0; i < 3; i++ {
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
require.Nil(t, err)
require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream1}}))
require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream2}}))
@@ -508,9 +510,10 @@ func Benchmark_SeriesIterator(b *testing.B) {
require.NoError(b, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
+ tenantsRetention := retention.NewTenantsRetention(limits)
for i := range instances {
- inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
require.NoError(b,
inst.Push(context.Background(), &logproto.PushRequest{
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index d0da49d359ddf..f36d048801589 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -32,6 +32,7 @@ import (
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/loki/v3/pkg/analytics"
+ "github.com/grafana/loki/v3/pkg/compactor/retention"
"github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/client"
@@ -265,6 +266,8 @@ type Ingester struct {
limiter *Limiter
+ tenantsRetention *retention.TenantsRetention
+
// Denotes whether the ingester should flush on shutdown.
// Currently only used by the WAL to signal when the disk is full.
flushOnShutdownSwitch *OnceSwitch
@@ -426,6 +429,8 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
// Now that the lifecycler has been created, we can create the limiter
// which depends on it.
i.limiter = NewLimiter(limits, metrics, streamCountLimiter, streamRateLimiter)
+
+ i.tenantsRetention = retention.NewTenantsRetention(i.limiter.limits)
i.recalculateOwnedStreams = newRecalculateOwnedStreamsSvc(i.getInstances, ownedStreamsStrategy, cfg.OwnedStreamsCheckInterval, util_log.Logger)
return i, nil
@@ -1038,7 +1043,7 @@ func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { /
inst, ok = i.instances[instanceID]
if !ok {
var err error
- inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter, i.pipelineWrapper, i.extractorWrapper, i.streamRateCalculator, i.writeLogManager, i.customStreamsTracker)
+ inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter, i.pipelineWrapper, i.extractorWrapper, i.streamRateCalculator, i.writeLogManager, i.customStreamsTracker, i.tenantsRetention)
if err != nil {
return nil, err
}
diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
index 80905bff23505..72a02df159103 100644
--- a/pkg/ingester/instance.go
+++ b/pkg/ingester/instance.go
@@ -25,6 +25,7 @@ import (
"github.com/grafana/loki/v3/pkg/analytics"
"github.com/grafana/loki/v3/pkg/chunkenc"
+ "github.com/grafana/loki/v3/pkg/compactor/retention"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/index"
"github.com/grafana/loki/v3/pkg/ingester/wal"
@@ -126,6 +127,8 @@ type instance struct {
schemaconfig *config.SchemaConfig
customStreamsTracker push.UsageTracker
+
+ tenantsRetention *retention.TenantsRetention
}
func newInstance(
@@ -143,6 +146,7 @@ func newInstance(
streamRateCalculator *StreamRateCalculator,
writeFailures *writefailures.Manager,
customStreamsTracker push.UsageTracker,
+ tenantsRetention *retention.TenantsRetention,
) (*instance, error) {
invertedIndex, err := index.NewMultiInvertedIndex(periodConfigs, uint32(cfg.IndexShards))
if err != nil {
@@ -181,6 +185,8 @@ func newInstance(
schemaconfig: &c,
customStreamsTracker: customStreamsTracker,
+
+ tenantsRetention: tenantsRetention,
}
i.mapper = NewFPMapper(i.getLabelsFromFingerprint)
@@ -290,12 +296,14 @@ func (i *instance) createStream(ctx context.Context, pushReqStream logproto.Stre
return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error())
}
+ retentionHours := util.RetentionHours(i.tenantsRetention.RetentionPeriodFor(i.instanceID, labels))
+
if record != nil {
err = i.streamCountLimiter.AssertNewStreamAllowed(i.instanceID)
}
if err != nil {
- return i.onStreamCreationError(ctx, pushReqStream, err, labels)
+ return i.onStreamCreationError(ctx, pushReqStream, err, labels, retentionHours)
}
fp := i.getHashForLabels(labels)
@@ -307,7 +315,7 @@ func (i *instance) createStream(ctx context.Context, pushReqStream logproto.Stre
return nil, fmt.Errorf("failed to create stream: %w", err)
}
- s := newStream(chunkfmt, headfmt, i.cfg, i.limiter.rateLimitStrategy, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures, i.configs)
+ s := newStream(chunkfmt, headfmt, i.cfg, i.limiter.rateLimitStrategy, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures, i.configs, retentionHours)
// record will be nil when replaying the wal (we don't want to rewrite wal entries as we replay them).
if record != nil {
@@ -325,7 +333,7 @@ func (i *instance) createStream(ctx context.Context, pushReqStream logproto.Stre
return s, nil
}
-func (i *instance) onStreamCreationError(ctx context.Context, pushReqStream logproto.Stream, err error, labels labels.Labels) (*stream, error) {
+func (i *instance) onStreamCreationError(ctx context.Context, pushReqStream logproto.Stream, err error, labels labels.Labels, retentionHours string) (*stream, error) {
if i.configs.LogStreamCreation(i.instanceID) || i.cfg.KafkaIngestion.Enabled {
l := level.Debug(util_log.Logger)
@@ -341,9 +349,9 @@ func (i *instance) onStreamCreationError(ctx context.Context, pushReqStream logp
)
}
- validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(len(pushReqStream.Entries)))
+ validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID, retentionHours).Add(float64(len(pushReqStream.Entries)))
bytes := util.EntriesTotalSize(pushReqStream.Entries)
- validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID).Add(float64(bytes))
+ validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID, retentionHours).Add(float64(bytes))
if i.customStreamsTracker != nil {
i.customStreamsTracker.DiscardedBytesAdd(ctx, i.instanceID, validation.StreamLimit, labels, float64(bytes))
}
@@ -375,7 +383,8 @@ func (i *instance) createStreamByFP(ls labels.Labels, fp model.Fingerprint) (*st
return nil, fmt.Errorf("failed to create stream for fingerprint: %w", err)
}
- s := newStream(chunkfmt, headfmt, i.cfg, i.limiter.rateLimitStrategy, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures, i.configs)
+ retentionHours := util.RetentionHours(i.tenantsRetention.RetentionPeriodFor(i.instanceID, ls))
+ s := newStream(chunkfmt, headfmt, i.cfg, i.limiter.rateLimitStrategy, i.instanceID, fp, sortedLabels, i.limiter.UnorderedWrites(i.instanceID), i.streamRateCalculator, i.metrics, i.writeFailures, i.configs, retentionHours)
i.onStreamCreated(s)
diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
index 369d0ab2d7469..65c813a4a8d46 100644
--- a/pkg/ingester/instance_test.go
+++ b/pkg/ingester/instance_test.go
@@ -10,7 +10,9 @@ import (
"testing"
"time"
+ "github.com/grafana/loki/v3/pkg/compactor/retention"
"github.com/grafana/loki/v3/pkg/storage/types"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/httpreq"
"github.com/grafana/dskit/tenant"
@@ -79,8 +81,9 @@ func TestLabelsCollisions(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
+ tenantsRetention := retention.NewTenantsRetention(limits)
- i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
require.Nil(t, err)
// avoid entries from the future.
@@ -107,8 +110,8 @@ func TestConcurrentPushes(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ tenantsRetention := retention.NewTenantsRetention(limits)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
require.Nil(t, err)
const (
@@ -159,8 +162,9 @@ func TestGetStreamRates(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
+ tenantsRetention := retention.NewTenantsRetention(limits)
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
require.NoError(t, err)
const (
@@ -254,7 +258,8 @@ func TestSyncPeriod(t *testing.T) {
minUtil = 0.20
)
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ tenantsRetention := retention.NewTenantsRetention(limits)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
require.Nil(t, err)
lbls := makeRandomLabels()
@@ -299,7 +304,8 @@ func setupTestStreams(t *testing.T) (*instance, time.Time, int) {
cfg.SyncMinUtilization = 0.20
cfg.IndexShards = indexShards
- instance, err := newInstance(cfg, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ tenantsRetention := retention.NewTenantsRetention(limits)
+ instance, err := newInstance(cfg, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
require.Nil(t, err)
currentTime := time.Now()
@@ -310,12 +316,13 @@ func setupTestStreams(t *testing.T) (*instance, time.Time, int) {
{Labels: "{app=\"test\",job=\"varlogs2\"}", Entries: entries(5, currentTime.Add(12*time.Nanosecond))},
}
+ retentionHours := util.RetentionHours(tenantsRetention.RetentionPeriodFor("test", nil))
for _, testStream := range testStreams {
stream, err := instance.getOrCreateStream(context.Background(), testStream, recordPool.GetRecord())
require.NoError(t, err)
chunkfmt, headfmt, err := instance.chunkFormatAt(minTs(&testStream))
require.NoError(t, err)
- chunk := newStream(chunkfmt, headfmt, cfg, limiter.rateLimitStrategy, "fake", 0, nil, true, NewStreamRateCalculator(), NilMetrics, nil, nil).NewChunk()
+ chunk := newStream(chunkfmt, headfmt, cfg, limiter.rateLimitStrategy, "fake", 0, nil, true, NewStreamRateCalculator(), NilMetrics, nil, nil, retentionHours).NewChunk()
for _, entry := range testStream.Entries {
dup, err := chunk.Append(&entry)
require.False(t, dup)
@@ -508,8 +515,9 @@ func Benchmark_PushInstance(b *testing.B) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(b, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
+ tenantsRetention := retention.NewTenantsRetention(limits)
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
ctx := context.Background()
for n := 0; n < b.N; n++ {
@@ -550,10 +558,10 @@ func Benchmark_instance_addNewTailer(b *testing.B) {
limits, err := validation.NewOverrides(l, nil)
require.NoError(b, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ tenantsRetention := retention.NewTenantsRetention(limits)
ctx := context.Background()
- inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
expr, err := syntax.ParseLogSelector(`{namespace="foo",pod="bar",instance=~"10.*"}`, true)
require.NoError(b, err)
t, err := newTailer("foo", expr, nil, 10)
@@ -572,10 +580,11 @@ func Benchmark_instance_addNewTailer(b *testing.B) {
chunkfmt, headfmt, err := inst.chunkFormatAt(model.Now())
require.NoError(b, err)
+ retentionHours := util.RetentionHours(tenantsRetention.RetentionPeriodFor("test", lbs))
b.Run("addTailersToNewStream", func(b *testing.B) {
for n := 0; n < b.N; n++ {
- inst.addTailersToNewStream(newStream(chunkfmt, headfmt, nil, limiter.rateLimitStrategy, "fake", 0, lbs, true, NewStreamRateCalculator(), NilMetrics, nil, nil))
+ inst.addTailersToNewStream(newStream(chunkfmt, headfmt, nil, limiter.rateLimitStrategy, "fake", 0, lbs, true, NewStreamRateCalculator(), NilMetrics, nil, nil, retentionHours))
}
})
}
@@ -1094,6 +1103,8 @@ func TestStreamShardingUsage(t *testing.T) {
defaultShardStreamsCfg := limiter.limits.ShardStreams("fake")
tenantShardStreamsCfg := limiter.limits.ShardStreams(customTenant1)
+ tenantsRetention := retention.NewTenantsRetention(limits)
+
t.Run("test default configuration", func(t *testing.T) {
require.Equal(t, true, defaultShardStreamsCfg.Enabled)
require.Equal(t, "1536KB", defaultShardStreamsCfg.DesiredRate.String())
@@ -1109,7 +1120,7 @@ func TestStreamShardingUsage(t *testing.T) {
t.Run("invalid push returns error", func(t *testing.T) {
tracker := &mockUsageTracker{}
- i, _ := newInstance(&Config{IndexShards: 1, OwnedStreamsCheckInterval: 1 * time.Second}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, tracker)
+ i, _ := newInstance(&Config{IndexShards: 1, OwnedStreamsCheckInterval: 1 * time.Second}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, tracker, tenantsRetention)
ctx := context.Background()
err = i.Push(ctx, &logproto.PushRequest{
@@ -1129,7 +1140,8 @@ func TestStreamShardingUsage(t *testing.T) {
})
t.Run("valid push returns no error", func(t *testing.T) {
- i, _ := newInstance(&Config{IndexShards: 1, OwnedStreamsCheckInterval: 1 * time.Second}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
+ tenantsRetention := retention.NewTenantsRetention(limits)
+ i, _ := newInstance(&Config{IndexShards: 1, OwnedStreamsCheckInterval: 1 * time.Second}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil, tenantsRetention)
ctx := context.Background()
err = i.Push(ctx, &logproto.PushRequest{
@@ -1450,6 +1462,7 @@ func defaultInstance(t *testing.T) *instance {
defaultLimits := defaultLimitsTestConfig()
overrides, err := validation.NewOverrides(defaultLimits, nil)
require.NoError(t, err)
+ tenantsRetention := retention.NewTenantsRetention(overrides)
instance, err := newInstance(
&ingesterConfig,
defaultPeriodConfigs,
@@ -1465,6 +1478,7 @@ func defaultInstance(t *testing.T) *instance {
NewStreamRateCalculator(),
nil,
nil,
+ tenantsRetention,
)
require.Nil(t, err)
insertData(t, instance)
diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go
index c4e64149f1658..f935d5b926c3f 100644
--- a/pkg/ingester/limiter.go
+++ b/pkg/ingester/limiter.go
@@ -9,6 +9,7 @@ import (
"github.com/grafana/dskit/ring"
"golang.org/x/time/rate"
+ "github.com/grafana/loki/v3/pkg/compactor/retention"
"github.com/grafana/loki/v3/pkg/distributor/shardstreams"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -33,6 +34,8 @@ type Limits interface {
PerStreamRateLimit(userID string) validation.RateLimit
ShardStreams(userID string) shardstreams.Config
IngestionPartitionsTenantShardSize(userID string) int
+
+ retention.Limits
}
// Limiter implements primitives to get the maximum number of streams
diff --git a/pkg/ingester/recalculate_owned_streams_test.go b/pkg/ingester/recalculate_owned_streams_test.go
index f3bea57f69bae..551b1e47aa797 100644
--- a/pkg/ingester/recalculate_owned_streams_test.go
+++ b/pkg/ingester/recalculate_owned_streams_test.go
@@ -13,6 +13,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/compactor/retention"
"github.com/grafana/loki/v3/pkg/runtime"
lokiring "github.com/grafana/loki/v3/pkg/util/ring"
"github.com/grafana/loki/v3/pkg/validation"
@@ -71,6 +72,7 @@ func Test_recalculateOwnedStreams_recalculateWithIngesterStrategy(t *testing.T)
}, nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(mockRing, 1), &TenantBasedStrategy{limits: limits})
+ tenantsRetention := retention.NewTenantsRetention(limits)
tenant, err := newInstance(
defaultConfig(),
@@ -87,6 +89,7 @@ func Test_recalculateOwnedStreams_recalculateWithIngesterStrategy(t *testing.T)
NewStreamRateCalculator(),
nil,
nil,
+ tenantsRetention,
)
require.NoError(t, err)
require.Equal(t, 100, tenant.ownedStreamsSvc.getFixedLimit(), "MaxGlobalStreamsPerUser is 100 at this moment")
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index b36cbb290db7e..3d2a7bf1d0319 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -82,6 +82,8 @@ type stream struct {
chunkHeadBlockFormat chunkenc.HeadBlockFmt
configs *runtime.TenantConfigs
+
+ retentionHours string
}
type chunkDesc struct {
@@ -112,6 +114,7 @@ func newStream(
metrics *ingesterMetrics,
writeFailures *writefailures.Manager,
configs *runtime.TenantConfigs,
+ retentionHours string,
) *stream {
hashNoShard, _ := labels.HashWithoutLabels(make([]byte, 0, 1024), ShardLbName)
return &stream{
@@ -132,7 +135,8 @@ func newStream(
chunkFormat: chunkFormat,
chunkHeadBlockFormat: headBlockFmt,
- configs: configs,
+ configs: configs,
+ retentionHours: retentionHours,
}
}
@@ -477,15 +481,15 @@ func (s *stream) reportMetrics(ctx context.Context, outOfOrderSamples, outOfOrde
if s.unorderedWrites {
name = validation.TooFarBehind
}
- validation.DiscardedSamples.WithLabelValues(name, s.tenant).Add(float64(outOfOrderSamples))
- validation.DiscardedBytes.WithLabelValues(name, s.tenant).Add(float64(outOfOrderBytes))
+ validation.DiscardedSamples.WithLabelValues(name, s.tenant, s.retentionHours).Add(float64(outOfOrderSamples))
+ validation.DiscardedBytes.WithLabelValues(name, s.tenant, s.retentionHours).Add(float64(outOfOrderBytes))
if usageTracker != nil {
usageTracker.DiscardedBytesAdd(ctx, s.tenant, name, s.labels, float64(outOfOrderBytes))
}
}
if rateLimitedSamples > 0 {
- validation.DiscardedSamples.WithLabelValues(validation.StreamRateLimit, s.tenant).Add(float64(rateLimitedSamples))
- validation.DiscardedBytes.WithLabelValues(validation.StreamRateLimit, s.tenant).Add(float64(rateLimitedBytes))
+ validation.DiscardedSamples.WithLabelValues(validation.StreamRateLimit, s.tenant, s.retentionHours).Add(float64(rateLimitedSamples))
+ validation.DiscardedBytes.WithLabelValues(validation.StreamRateLimit, s.tenant, s.retentionHours).Add(float64(rateLimitedBytes))
if usageTracker != nil {
usageTracker.DiscardedBytesAdd(ctx, s.tenant, validation.StreamRateLimit, s.labels, float64(rateLimitedBytes))
}
diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go
index 8bf7bfaf4ce98..d255f9a3e5f04 100644
--- a/pkg/ingester/stream_test.go
+++ b/pkg/ingester/stream_test.go
@@ -16,6 +16,7 @@ import (
"github.com/grafana/loki/v3/pkg/compression"
"github.com/grafana/loki/v3/pkg/runtime"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/dskit/httpgrpc"
"github.com/prometheus/common/model"
@@ -57,6 +58,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
@@ -79,6 +81,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
)
_, err := s.Push(context.Background(), []logproto.Entry{
@@ -115,7 +118,7 @@ func TestPushDeduplication(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
chunkfmt, headfmt := defaultChunkFormat(t)
s := newStream(
@@ -133,6 +136,7 @@ func TestPushDeduplication(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
)
written, err := s.Push(context.Background(), []logproto.Entry{
@@ -151,7 +155,7 @@ func TestPushDeduplicationExtraMetrics(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
chunkfmt, headfmt := defaultChunkFormat(t)
buf := bytes.NewBuffer(nil)
@@ -193,6 +197,7 @@ func TestPushDeduplicationExtraMetrics(t *testing.T) {
metrics,
manager,
runtimeCfg,
+ retentionHours,
)
_, err = s.Push(context.Background(), []logproto.Entry{
@@ -221,7 +226,7 @@ func TestPushRejectOldCounter(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
chunkfmt, headfmt := defaultChunkFormat(t)
s := newStream(
@@ -239,6 +244,7 @@ func TestPushRejectOldCounter(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
)
// counter should be 2 now since the first line will be deduped
@@ -329,7 +335,7 @@ func TestEntryErrorCorrectlyReported(t *testing.T) {
limits, err := validation.NewOverrides(l, nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
chunkfmt, headfmt := defaultChunkFormat(t)
s := newStream(
@@ -347,6 +353,7 @@ func TestEntryErrorCorrectlyReported(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
)
s.highestTs = time.Now()
@@ -368,7 +375,7 @@ func TestUnorderedPush(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
chunkfmt, headfmt := defaultChunkFormat(t)
s := newStream(
@@ -386,6 +393,7 @@ func TestUnorderedPush(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
)
for _, x := range []struct {
@@ -471,7 +479,7 @@ func TestPushRateLimit(t *testing.T) {
limits, err := validation.NewOverrides(l, nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
chunkfmt, headfmt := defaultChunkFormat(t)
s := newStream(
@@ -489,6 +497,7 @@ func TestPushRateLimit(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
)
entries := []logproto.Entry{
@@ -511,7 +520,7 @@ func TestPushRateLimitAllOrNothing(t *testing.T) {
limits, err := validation.NewOverrides(l, nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
cfg := defaultConfig()
chunkfmt, headfmt := defaultChunkFormat(t)
@@ -530,6 +539,7 @@ func TestPushRateLimitAllOrNothing(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
)
entries := []logproto.Entry{
@@ -550,7 +560,7 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
-
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
cfg := defaultConfig()
cfg.MaxChunkAge = time.Minute
chunkfmt, headfmt := defaultChunkFormat(t)
@@ -570,6 +580,7 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
)
base := time.Now()
@@ -619,8 +630,8 @@ func Benchmark_PushStream(b *testing.B) {
require.NoError(b, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
chunkfmt, headfmt := defaultChunkFormat(b)
-
- s := newStream(chunkfmt, headfmt, &Config{MaxChunkAge: 24 * time.Hour}, limiter.rateLimitStrategy, "fake", model.Fingerprint(0), ls, true, NewStreamRateCalculator(), NilMetrics, nil, nil)
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
+ s := newStream(chunkfmt, headfmt, &Config{MaxChunkAge: 24 * time.Hour}, limiter.rateLimitStrategy, "fake", model.Fingerprint(0), ls, true, NewStreamRateCalculator(), NilMetrics, nil, nil, retentionHours)
expr, err := syntax.ParseLogSelector(`{namespace="loki-dev"}`, true)
require.NoError(b, err)
t, err := newTailer("foo", expr, &fakeTailServer{}, 10)
diff --git a/pkg/ingester/streams_map_test.go b/pkg/ingester/streams_map_test.go
index 273c489d34d4a..2051dfce3ae9c 100644
--- a/pkg/ingester/streams_map_test.go
+++ b/pkg/ingester/streams_map_test.go
@@ -7,6 +7,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -14,6 +15,7 @@ func TestStreamsMap(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, newIngesterRingLimiterStrategy(&ringCountMock{count: 1}, 1), &TenantBasedStrategy{limits: limits})
+ retentionHours := util.RetentionHours(limiter.limits.RetentionPeriod("fake"))
chunkfmt, headfmt := defaultChunkFormat(t)
ss := []*stream{
@@ -32,6 +34,7 @@ func TestStreamsMap(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
),
newStream(
chunkfmt,
@@ -48,6 +51,7 @@ func TestStreamsMap(t *testing.T) {
NilMetrics,
nil,
nil,
+ retentionHours,
),
}
var s *stream
diff --git a/pkg/util/time.go b/pkg/util/time.go
index 5b620a73d0a86..450579f74739c 100644
--- a/pkg/util/time.go
+++ b/pkg/util/time.go
@@ -154,3 +154,7 @@ func GetFactorOfTime(from, through int64, minTime, maxTime int64) (factor float6
return factor
}
+
+func RetentionHours(retention time.Duration) string {
+ return strconv.FormatInt(int64(math.Floor(retention.Hours())), 10)
+}
diff --git a/pkg/util/time_test.go b/pkg/util/time_test.go
index f77530a5da866..a2ab02dbecfc4 100644
--- a/pkg/util/time_test.go
+++ b/pkg/util/time_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"time"
+ "github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -304,3 +305,10 @@ func TestGetFactorOfTime(t *testing.T) {
})
}
}
+
+func TestRetentionHours(t *testing.T) {
+ require.Equal(t, "24", RetentionHours(24*time.Hour))
+ sevenDays, err := model.ParseDuration("7d")
+ require.NoError(t, err)
+ require.Equal(t, "168", RetentionHours(time.Duration(sevenDays)))
+}
diff --git a/pkg/validation/validate.go b/pkg/validation/validate.go
index ff681ac8d0936..31e1729e264cc 100644
--- a/pkg/validation/validate.go
+++ b/pkg/validation/validate.go
@@ -115,7 +115,7 @@ var DiscardedBytes = promauto.NewCounterVec(
Name: "discarded_bytes_total",
Help: "The total number of bytes that were discarded.",
},
- []string{ReasonLabel, "tenant"},
+ []string{ReasonLabel, "tenant", "retention_hours"},
)
// DiscardedSamples is a metric of the number of discarded samples, by reason.
@@ -125,7 +125,7 @@ var DiscardedSamples = promauto.NewCounterVec(
Name: "discarded_samples_total",
Help: "The total number of samples that were discarded.",
},
- []string{ReasonLabel, "tenant"},
+ []string{ReasonLabel, "tenant", "retention_hours"},
)
var LineLengthHist = promauto.NewHistogram(prometheus.HistogramOpts{
|
refactor
|
Add retention hours to `discarded` metrics (#15875)
|
c48281c7b77d0a2d98bb5f5d360879500b8acd37
|
2025-01-17 20:33:58
|
renovate[bot]
|
fix(deps): update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.73.1 (#15804)
| false
|
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod
index 21cd8a9948c1b..2bfdde52595f7 100644
--- a/tools/lambda-promtail/go.mod
+++ b/tools/lambda-promtail/go.mod
@@ -6,7 +6,7 @@ require (
github.com/aws/aws-lambda-go v1.47.0
github.com/aws/aws-sdk-go-v2 v1.33.0
github.com/aws/aws-sdk-go-v2/config v1.29.0
- github.com/aws/aws-sdk-go-v2/service/s3 v1.73.0
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.73.1
github.com/go-kit/log v0.2.1
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
@@ -32,7 +32,7 @@ require (
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.28 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.10 // indirect
diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum
index 15fcb444e3ef6..72367bf3c71e1 100644
--- a/tools/lambda-promtail/go.sum
+++ b/tools/lambda-promtail/go.sum
@@ -68,14 +68,14 @@ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.28 h1:7kpeALOUeThs2kEjlAxlADAVfxK
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.28/go.mod h1:pyaOYEdp1MJWgtXLy6q80r3DhsVdOIOZNB9hdTcJIvI=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.0 h1:pC19SLXdHsfXTvCwy3sHfiACXaSjRkKlOQYnaTk8loI=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.0/go.mod h1:dIW8puxSbYLSPv/ju0d9A3CpwXdtqvJtYKDMVmPLOWE=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.1 h1:mJ9FRktB8v1Ihpqwfk0AWvYEd0FgQtLsshc2Qb2TVc8=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.1/go.mod h1:dIW8puxSbYLSPv/ju0d9A3CpwXdtqvJtYKDMVmPLOWE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 h1:TQmKDyETFGiXVhZfQ/I0cCFziqqX58pi4tKJGYGFSz0=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9/go.mod h1:HVLPK2iHQBUx7HfZeOQSEu3v2ubZaAY2YPbAm5/WUyY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.9 h1:2aInXbh02XsbO0KobPGMNXyv2QP73VDKsWPNJARj/+4=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.9/go.mod h1:dgXS1i+HgWnYkPXqNoPIPKeUsUUYHaUbThC90aDnNiE=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.73.0 h1:sHF4brL/726nbTldh8GGDKFS5LsQ8FwOTKEyvKp9DB4=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.73.0/go.mod h1:rGHXqEgGFrz7j58tIGKKAfD1fJzYXeKkN/Jn3eIRZYE=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.73.1 h1:OzmyfYGiMCOIAq5pa0KWcaZoA9F8FqajOJevh+hhFdY=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.73.1/go.mod h1:K+0a0kWDHAUXBH8GvYGS3cQRwIuRjO9bMWUz6vpNCaU=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.10 h1:DyZUj3xSw3FR3TXSwDhPhuZkkT14QHBiacdbUVcD0Dg=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.10/go.mod h1:Ro744S4fKiCCuZECXgOi760TiYylUM8ZBf6OGiZzJtY=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.9 h1:I1TsPEs34vbpOnR81GIcAq4/3Ud+jRHVGwx6qLQUHLs=
|
fix
|
update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.73.1 (#15804)
|
b44517a9f187bbe17a7b6fdc7fa66e38ce058547
|
2024-07-16 16:25:08
|
Salva Corts
|
test: Add logging for empty blooms (#13537)
| false
|
diff --git a/pkg/bloombuild/builder/spec.go b/pkg/bloombuild/builder/spec.go
index 3feca8f49a3b0..abb6cef1447f2 100644
--- a/pkg/bloombuild/builder/spec.go
+++ b/pkg/bloombuild/builder/spec.go
@@ -94,6 +94,11 @@ func NewSimpleBloomGenerator(
opts.Schema.NGramSkip(),
int(opts.UnencodedBlockOptions.MaxBloomSizeBytes),
metrics,
+ log.With(
+ logger,
+ "component", "bloom_tokenizer",
+ "org_id", userID,
+ ),
),
}
}
diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go
index 7a1d000dde709..61cd8f1d06a44 100644
--- a/pkg/bloomcompactor/spec.go
+++ b/pkg/bloomcompactor/spec.go
@@ -94,6 +94,11 @@ func NewSimpleBloomGenerator(
opts.Schema.NGramSkip(),
int(opts.UnencodedBlockOptions.MaxBloomSizeBytes),
metrics.bloomMetrics,
+ log.With(
+ logger,
+ "component", "bloom_tokenizer",
+ "org_id", userID,
+ ),
),
}
}
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer.go b/pkg/storage/bloom/v1/bloom_tokenizer.go
index af5060e2c0c42..274f4c37f25dc 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer.go
@@ -4,16 +4,15 @@ import (
"math"
"unsafe"
+ "github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/loki/pkg/push"
-
"github.com/grafana/loki/v3/pkg/iter"
v2iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/bloom/v1/filter"
"github.com/grafana/loki/v3/pkg/util/encoding"
- util_log "github.com/grafana/loki/v3/pkg/util/log"
)
/*
@@ -24,6 +23,7 @@ Bloom filters are utilized for faster lookups of log lines.
*/
type BloomTokenizer struct {
metrics *Metrics
+ logger log.Logger
maxBloomSize int // size in bytes
lineTokenizer *NGramTokenizer
@@ -39,11 +39,11 @@ const eightBits = 8
// 1) The token slices generated must not be mutated externally
// 2) The token slice must not be used after the next call to `Tokens()` as it will repopulate the slice.
// 2) This is not thread safe.
-func NewBloomTokenizer(nGramLen, nGramSkip int, maxBloomSize int, metrics *Metrics) *BloomTokenizer {
- // TODO(chaudum): Replace logger
- level.Info(util_log.Logger).Log("msg", "create new bloom tokenizer", "ngram length", nGramLen, "ngram skip", nGramSkip)
+func NewBloomTokenizer(nGramLen, nGramSkip int, maxBloomSize int, metrics *Metrics, logger log.Logger) *BloomTokenizer {
+ level.Info(logger).Log("msg", "create new bloom tokenizer", "ngram length", nGramLen, "ngram skip", nGramSkip)
return &BloomTokenizer{
metrics: metrics,
+ logger: logger,
cache: make(map[string]interface{}, cacheSize),
lineTokenizer: NewNGramTokenizer(nGramLen, nGramSkip),
maxBloomSize: maxBloomSize,
@@ -120,6 +120,16 @@ func (bt *BloomTokenizer) Populate(
if next {
// The last bloom has been made available via the `Next()` call above
bloom = blooms.At()
+
+ // TODO(salvacorts): Delete this once we solve the correctness bug
+ // We noticed some blooms are empty on the resulting blocks.
+ // We have the feeling that the empty blooms may be reused from old blocks.
+ // Here we log an error if we find an empty bloom.
+ if bloom.Count() == 0 {
+ level.Warn(bt.logger).Log(
+ "msg", "found existing empty bloom",
+ )
+ }
} else {
bloom = bt.newBloom()
}
@@ -155,7 +165,13 @@ func (bt *BloomTokenizer) Populate(
break
}
+ }
+ // TODO(salvacorts): Delete this once we solve the correctness bug
+ if bloom.Count() == 0 {
+ level.Warn(bt.logger).Log(
+ "msg", "resulting bloom is empty",
+ )
}
// Send the last bloom
diff --git a/pkg/storage/bloom/v1/bloom_tokenizer_test.go b/pkg/storage/bloom/v1/bloom_tokenizer_test.go
index 7023958eca112..8f3e4f473e930 100644
--- a/pkg/storage/bloom/v1/bloom_tokenizer_test.go
+++ b/pkg/storage/bloom/v1/bloom_tokenizer_test.go
@@ -8,10 +8,10 @@ import (
"testing"
"time"
+ logger "github.com/go-kit/log"
"github.com/grafana/dskit/multierror"
"github.com/grafana/loki/pkg/push"
-
"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/iter"
v2 "github.com/grafana/loki/v3/pkg/iter/v2"
@@ -82,7 +82,7 @@ func TestPrefixedKeyCreation(t *testing.T) {
func TestSetLineTokenizer(t *testing.T) {
t.Parallel()
- bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics)
+ bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics, logger.NewNopLogger())
// Validate defaults
require.Equal(t, bt.lineTokenizer.N(), DefaultNGramLength)
@@ -97,7 +97,7 @@ func TestSetLineTokenizer(t *testing.T) {
func TestTokenizerPopulate(t *testing.T) {
t.Parallel()
var testLine = "this is a log line"
- bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics)
+ bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics, logger.NewNopLogger())
sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8)
@@ -138,7 +138,7 @@ func TestTokenizerPopulate(t *testing.T) {
func TestBloomTokenizerPopulateWithoutPreexistingBloom(t *testing.T) {
var testLine = "this is a log line"
- bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics)
+ bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics, logger.NewNopLogger())
memChunk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.ChunkHeadFormatFor(chunkenc.ChunkFormatV4), 256000, 1500000)
_, _ = memChunk.Append(&push.Entry{
@@ -206,7 +206,7 @@ func randomStr(ln int) string {
func TestTokenizerPopulateWontExceedMaxSize(t *testing.T) {
maxSize := 2048
- bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, maxSize, NewMetrics(nil))
+ bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, maxSize, NewMetrics(nil), logger.NewNopLogger())
ch := make(chan *BloomCreation)
line := randomStr(10e3)
itr, err := chunkRefItrFromLines(line)
@@ -257,7 +257,7 @@ func populateAndConsumeBloom(
func BenchmarkPopulateSeriesWithBloom(b *testing.B) {
for i := 0; i < b.N; i++ {
var testLine = lorem + lorem + lorem
- bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics)
+ bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics, logger.NewNopLogger())
sbf := filter.NewScalableBloomFilter(1024, 0.01, 0.8)
@@ -290,7 +290,7 @@ func BenchmarkPopulateSeriesWithBloom(b *testing.B) {
}
func TestTokenizerClearsCacheBetweenPopulateCalls(t *testing.T) {
- bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, NewMetrics(nil))
+ bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, NewMetrics(nil), logger.NewNopLogger())
line := "foobarbazz"
var blooms []*Bloom
@@ -329,7 +329,7 @@ func TestTokenizerClearsCacheBetweenPopulateCalls(t *testing.T) {
}
func BenchmarkMapClear(b *testing.B) {
- bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics)
+ bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics, logger.NewNopLogger())
for i := 0; i < b.N; i++ {
for k := 0; k < cacheSize; k++ {
bt.cache[fmt.Sprint(k)] = k
@@ -340,7 +340,7 @@ func BenchmarkMapClear(b *testing.B) {
}
func BenchmarkNewMap(b *testing.B) {
- bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics)
+ bt := NewBloomTokenizer(DefaultNGramLength, DefaultNGramSkip, 0, metrics, logger.NewNopLogger())
for i := 0; i < b.N; i++ {
for k := 0; k < cacheSize; k++ {
bt.cache[fmt.Sprint(k)] = k
|
test
|
Add logging for empty blooms (#13537)
|
469b54e3d58807d4bc24e4d247a2a205036b0954
|
2024-11-20 21:40:39
|
J Stickler
|
docs: update topics for Blooms (#15028)
| false
|
diff --git a/docs/sources/get-started/labels/structured-metadata.md b/docs/sources/get-started/labels/structured-metadata.md
index f1877139fef70..d76d1129471f9 100644
--- a/docs/sources/get-started/labels/structured-metadata.md
+++ b/docs/sources/get-started/labels/structured-metadata.md
@@ -23,8 +23,10 @@ You should only use structured metadata in the following situations:
- If you are ingesting data in OpenTelemetry format, using Grafana Alloy or an OpenTelemetry Collector. Structured metadata was designed to support native ingestion of OpenTelemetry data.
- If you have high cardinality metadata that should not be used as a label and does not exist in the log line. Some examples might include `process_id` or `thread_id` or Kubernetes pod names.
-
-It is an antipattern to extract information that already exists in your log lines and put it into structured metadata.
+- If you are using [Explore Logs](https://grafana.com/docs/grafana-cloud/visualizations/simplified-exploration/logs/) to visualize and explore your Loki logs.
+- If you are a large-scale customer, who is ingesting more than 75TB of logs a month and are using [Bloom filters](https://grafana.com/docs/loki/<LOKI_VERSION>/operations/bloom-filters/)
+
+We do not recommend extracting information that already exists in your log lines and putting it into structured metadata.
## Attaching structured metadata to log lines
@@ -37,9 +39,10 @@ See the [Promtail: Structured metadata stage](https://grafana.com/docs/loki/<LOK
With Loki version 1.2.0, support for structured metadata has been added to the Logstash output plugin. For more information, see [logstash](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/logstash/).
{{< admonition type="warning" >}}
-Structured metadata size is taken into account while asserting ingestion rate limiting.
+Structured metadata size is taken into account while asserting ingestion rate limiting.
Along with that, there are separate limits on how much structured metadata can be attached per log line.
-```
+
+```yaml
# Maximum size accepted for structured metadata per log line.
# CLI flag: -limits.max-structured-metadata-size
[max_structured_metadata_size: <int> | default = 64KB]
@@ -48,6 +51,7 @@ Along with that, there are separate limits on how much structured metadata can b
# CLI flag: -limits.max-structured-metadata-entries-count
[max_structured_metadata_entries_count: <int> | default = 128]
```
+
{{< /admonition >}}
## Querying structured metadata
diff --git a/docs/sources/query/query_accceleration.md b/docs/sources/query/query_accceleration.md
index 9117ecb209f23..a760b9e66faa2 100644
--- a/docs/sources/query/query_accceleration.md
+++ b/docs/sources/query/query_accceleration.md
@@ -1,5 +1,5 @@
---
-title: Query acceleration (Experimental)
+title: Query acceleration
menuTitle: Query acceleration
description: Provides instructions on how to write LogQL queries to benefit from query acceleration.
weight: 900
@@ -8,10 +8,11 @@ keywords:
- query acceleration
---
-# Query acceleration (Experimental)
+# Query acceleration
{{< admonition type="warning" >}}
-Query acceleration using blooms is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. No SLA is provided.
+In Loki and Grafana Enterprise Logs (GEL), Query acceleration using blooms is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. No SLA is provided.
+In Grafana Cloud, Query acceleration using blooms is enabled for large-scale customers that send more than 75TB of logs a month as a [public preview](/docs/release-life-cycle/) with limited support and no SLA.
{{< /admonition >}}
If [bloom filters][] are enabled, you can write LogQL queries using [structured metadata][] to benefit from query acceleration.
@@ -26,19 +27,26 @@ If [bloom filters][] are enabled, you can write LogQL queries using [structured
Queries will be accelerated for any [label filter expression][] that satisfies _all_ of the following criteria:
* The label filter expression using **string equality**, such as `| key="value"`.
- * `or` and `and` operators can be used to match multiple values, such as `| detected_level="error" or detected_level="warn"`.
- * _Basic_ regular expressions are automatically simplified into a supported expression:
- * `| key=~"value"` is converted to `| key="value"`.
- * `| key=~"value1|value2"` is converted to `| key="value1" or key="value2"`.
- * `| key=~".+"` checks for existence of `key`. `.*` is not supported.
+ * `or` and `and` operators can be used to match multiple values, such as `| detected_level="error" or detected_level="warn"`.
+ * _Basic_ regular expressions are automatically simplified into a supported expression:
+ * `| key=~"value"` is converted to `| key="value"`.
+ * `| key=~"value1|value2"` is converted to `| key="value1" or key="value2"`.
+ * `| key=~".+"` checks for existence of `key`. `.*` is not supported.
* The label filter expression is querying for structured metadata and not a stream label.
* The label filter expression is placed before any [parser expression][], [labels format expression][], [drop labels expression][], or [keep labels expression][].
-To take full advantage of query acceleration with blooms, ensure that filtering structured metadata is done before any parse expression:
+To take full advantage of query acceleration with blooms, ensure that filtering structured metadata is done before any parser expression:
+
+In the following example, the query is not accelerated because the structured metadata filter, `detected_level="error"`, is after a parser stage, `json`.
+
+```logql
+{cluster="prod"} | logfmt | json | detected_level="error"
+```
+
+In the following example, the query is accelerated because the structured metadata filter is before any parser stage.
```logql
-{cluster="prod"} | logfmt | json | detected_level="error" # NOT ACCELERATED: structured metadata filter is after a parse stage
-{cluster="prod"} | detected_level="error" | logfmt | json # ACCELERATED: structured metadata filter is before any parse stage
+{cluster="prod"} | detected_level="error" | logfmt | json
```
[bloom filters]: https://grafana.com/docs/loki/<LOKI_VERSION>/operations/bloom-filters/
|
docs
|
update topics for Blooms (#15028)
|
c4ac168a8c7885847fee505a6b63708f8c30aca4
|
2024-02-07 02:28:19
|
Christian Haudum
|
bloomstore: Change signature of `FetchBlocks()` to return `[]*CloseableBlockQuerier` (#11884)
| false
|
diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go
index f6487d5f61d11..2002d8ce2a8bc 100644
--- a/pkg/bloomcompactor/controller.go
+++ b/pkg/bloomcompactor/controller.go
@@ -184,7 +184,7 @@ func (s *SimpleBloomController) do(ctx context.Context) error {
}
-func (s *SimpleBloomController) loadWorkForGap(ctx context.Context, id tsdb.Identifier, gap gapWithBlocks) (v1.CloseableIterator[*v1.Series], []*bloomshipper.ClosableBlockQuerier, error) {
+func (s *SimpleBloomController) loadWorkForGap(ctx context.Context, id tsdb.Identifier, gap gapWithBlocks) (v1.CloseableIterator[*v1.Series], []*bloomshipper.CloseableBlockQuerier, error) {
// load a series iterator for the gap
seriesItr, err := s.tsdbStore.LoadTSDB(id, gap.bounds)
if err != nil {
@@ -195,12 +195,8 @@ func (s *SimpleBloomController) loadWorkForGap(ctx context.Context, id tsdb.Iden
if err != nil {
return nil, nil, errors.Wrap(err, "failed to get blocks")
}
- results := make([]*bloomshipper.ClosableBlockQuerier, 0, len(blocks))
- for _, block := range blocks {
- results = append(results, block.BlockQuerier())
- }
- return seriesItr, results, nil
+ return seriesItr, blocks, nil
}
type gapWithBlocks struct {
diff --git a/pkg/bloomcompactor/spec.go b/pkg/bloomcompactor/spec.go
index 341e3977502c3..bf9a0a02387b4 100644
--- a/pkg/bloomcompactor/spec.go
+++ b/pkg/bloomcompactor/spec.go
@@ -74,7 +74,7 @@ type SimpleBloomGenerator struct {
chunkLoader ChunkLoader
// TODO(owen-d): blocks need not be all downloaded prior. Consider implementing
// as an iterator of iterators, where each iterator is a batch of overlapping blocks.
- blocks []*bloomshipper.ClosableBlockQuerier
+ blocks []*bloomshipper.CloseableBlockQuerier
// options to build blocks with
opts v1.BlockOptions
@@ -95,7 +95,7 @@ func NewSimpleBloomGenerator(
opts v1.BlockOptions,
store v1.Iterator[*v1.Series],
chunkLoader ChunkLoader,
- blocks []*bloomshipper.ClosableBlockQuerier,
+ blocks []*bloomshipper.CloseableBlockQuerier,
readWriterFn func() (v1.BlockWriter, v1.BlockReader),
metrics *Metrics,
logger log.Logger,
@@ -136,7 +136,7 @@ func (s *SimpleBloomGenerator) Generate(ctx context.Context) (skippedBlocks []v1
var closeErrors multierror.MultiError
blocksMatchingSchema := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(s.blocks))
- toClose := make([]*bloomshipper.ClosableBlockQuerier, 0, len(s.blocks))
+ toClose := make([]*bloomshipper.CloseableBlockQuerier, 0, len(s.blocks))
// Close all remaining blocks on exit
defer func() {
for _, block := range toClose {
diff --git a/pkg/bloomcompactor/spec_test.go b/pkg/bloomcompactor/spec_test.go
index efc0d70f2020a..c43a4b715a1e7 100644
--- a/pkg/bloomcompactor/spec_test.go
+++ b/pkg/bloomcompactor/spec_test.go
@@ -64,9 +64,9 @@ func (dummyChunkLoader) Load(_ context.Context, series *v1.Series) (*ChunkItersB
}
func dummyBloomGen(opts v1.BlockOptions, store v1.Iterator[*v1.Series], blocks []*v1.Block) *SimpleBloomGenerator {
- bqs := make([]*bloomshipper.ClosableBlockQuerier, 0, len(blocks))
+ bqs := make([]*bloomshipper.CloseableBlockQuerier, 0, len(blocks))
for _, b := range blocks {
- bqs = append(bqs, &bloomshipper.ClosableBlockQuerier{
+ bqs = append(bqs, &bloomshipper.CloseableBlockQuerier{
BlockQuerier: v1.NewBlockQuerier(b),
})
}
diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go
index 117e736e4f54f..26895bc43eda5 100644
--- a/pkg/bloomgateway/processor.go
+++ b/pkg/bloomgateway/processor.go
@@ -17,17 +17,8 @@ type tasksForBlock struct {
tasks []Task
}
-type blockLoader interface {
- LoadBlocks(context.Context, []bloomshipper.BlockRef) (v1.Iterator[bloomshipper.BlockQuerierWithFingerprintRange], error)
-}
-
-type store interface {
- blockLoader
- bloomshipper.Store
-}
-
type processor struct {
- store store
+ store bloomshipper.Store
logger log.Logger
}
@@ -70,17 +61,20 @@ func (p *processor) processBlocks(ctx context.Context, data []tasksForBlock) err
refs = append(refs, block.blockRef)
}
- blockIter, err := p.store.LoadBlocks(ctx, refs)
+ bqs, err := p.store.FetchBlocks(ctx, refs)
if err != nil {
return err
}
+ blockIter := v1.NewSliceIter(bqs)
+
outer:
for blockIter.Next() {
bq := blockIter.At()
for i, block := range data {
- if block.blockRef.Bounds.Equal(bq.FingerprintBounds) {
+ if block.blockRef.Bounds.Equal(bq.Bounds) {
err := p.processBlock(ctx, bq.BlockQuerier, block.tasks)
+ bq.Close()
if err != nil {
return err
}
@@ -88,6 +82,8 @@ outer:
continue outer
}
}
+ // should not happen, but close anyway
+ bq.Close()
}
return nil
}
diff --git a/pkg/bloomgateway/processor_test.go b/pkg/bloomgateway/processor_test.go
index 7cb37d97a0278..d39ba61a89613 100644
--- a/pkg/bloomgateway/processor_test.go
+++ b/pkg/bloomgateway/processor_test.go
@@ -12,16 +12,15 @@ import (
"go.uber.org/atomic"
"github.com/grafana/loki/pkg/logql/syntax"
- v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper"
)
-var _ store = &dummyStore{}
+var _ bloomshipper.Store = &dummyStore{}
type dummyStore struct {
metas []bloomshipper.Meta
blocks []bloomshipper.BlockRef
- querieres []bloomshipper.BlockQuerierWithFingerprintRange
+ querieres []*bloomshipper.CloseableBlockQuerier
}
func (s *dummyStore) ResolveMetas(_ context.Context, _ bloomshipper.MetaSearchParams) ([][]bloomshipper.MetaRef, []*bloomshipper.Fetcher, error) {
@@ -38,10 +37,6 @@ func (s *dummyStore) FetchMetas(_ context.Context, _ bloomshipper.MetaSearchPara
return s.metas, nil
}
-func (s *dummyStore) FetchBlocks(_ context.Context, _ []bloomshipper.BlockRef) ([]bloomshipper.BlockDirectory, error) {
- panic("don't call me")
-}
-
func (s *dummyStore) Fetcher(_ model.Time) (*bloomshipper.Fetcher, error) {
return nil, nil
}
@@ -53,12 +48,12 @@ func (s *dummyStore) Client(_ model.Time) (bloomshipper.Client, error) {
func (s *dummyStore) Stop() {
}
-func (s *dummyStore) LoadBlocks(_ context.Context, refs []bloomshipper.BlockRef) (v1.Iterator[bloomshipper.BlockQuerierWithFingerprintRange], error) {
- result := make([]bloomshipper.BlockQuerierWithFingerprintRange, len(s.querieres))
+func (s *dummyStore) FetchBlocks(_ context.Context, refs []bloomshipper.BlockRef) ([]*bloomshipper.CloseableBlockQuerier, error) {
+ result := make([]*bloomshipper.CloseableBlockQuerier, 0, len(s.querieres))
for _, ref := range refs {
for _, bq := range s.querieres {
- if ref.Bounds.Equal(bq.FingerprintBounds) {
+ if ref.Bounds.Equal(bq.Bounds) {
result = append(result, bq)
}
}
@@ -68,7 +63,7 @@ func (s *dummyStore) LoadBlocks(_ context.Context, refs []bloomshipper.BlockRef)
result[i], result[j] = result[j], result[i]
})
- return v1.NewSliceIter(result), nil
+ return result, nil
}
func TestProcessor(t *testing.T) {
diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go
index b77d2fe68f47a..f19564b43ef59 100644
--- a/pkg/bloomgateway/util_test.go
+++ b/pkg/bloomgateway/util_test.go
@@ -295,10 +295,10 @@ func TestPartitionRequest(t *testing.T) {
}
-func createBlockQueriers(t *testing.T, numBlocks int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockQuerierWithFingerprintRange, [][]v1.SeriesWithBloom) {
+func createBlockQueriers(t *testing.T, numBlocks int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]*bloomshipper.CloseableBlockQuerier, [][]v1.SeriesWithBloom) {
t.Helper()
step := (maxFp - minFp) / model.Fingerprint(numBlocks)
- bqs := make([]bloomshipper.BlockQuerierWithFingerprintRange, 0, numBlocks)
+ bqs := make([]*bloomshipper.CloseableBlockQuerier, 0, numBlocks)
series := make([][]v1.SeriesWithBloom, 0, numBlocks)
for i := 0; i < numBlocks; i++ {
fromFp := minFp + (step * model.Fingerprint(i))
@@ -308,9 +308,15 @@ func createBlockQueriers(t *testing.T, numBlocks int, from, through model.Time,
throughFp = maxFp
}
blockQuerier, data := v1.MakeBlockQuerier(t, fromFp, throughFp, from, through)
- bq := bloomshipper.BlockQuerierWithFingerprintRange{
- BlockQuerier: blockQuerier,
- FingerprintBounds: v1.NewBounds(fromFp, throughFp),
+ bq := &bloomshipper.CloseableBlockQuerier{
+ BlockQuerier: blockQuerier,
+ BlockRef: bloomshipper.BlockRef{
+ Ref: bloomshipper.Ref{
+ Bounds: v1.NewBounds(fromFp, throughFp),
+ StartTimestamp: from,
+ EndTimestamp: through,
+ },
+ },
}
bqs = append(bqs, bq)
series = append(series, data)
@@ -318,12 +324,12 @@ func createBlockQueriers(t *testing.T, numBlocks int, from, through model.Time,
return bqs, series
}
-func createBlocks(t *testing.T, tenant string, n int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockRef, []bloomshipper.Meta, []bloomshipper.BlockQuerierWithFingerprintRange, [][]v1.SeriesWithBloom) {
+func createBlocks(t *testing.T, tenant string, n int, from, through model.Time, minFp, maxFp model.Fingerprint) ([]bloomshipper.BlockRef, []bloomshipper.Meta, []*bloomshipper.CloseableBlockQuerier, [][]v1.SeriesWithBloom) {
t.Helper()
blocks := make([]bloomshipper.BlockRef, 0, n)
metas := make([]bloomshipper.Meta, 0, n)
- queriers := make([]bloomshipper.BlockQuerierWithFingerprintRange, 0, n)
+ queriers := make([]*bloomshipper.CloseableBlockQuerier, 0, n)
series := make([][]v1.SeriesWithBloom, 0, n)
step := (maxFp - minFp) / model.Fingerprint(n)
@@ -352,9 +358,9 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
Blocks: []bloomshipper.BlockRef{block},
}
blockQuerier, data := v1.MakeBlockQuerier(t, fromFp, throughFp, from, through)
- querier := bloomshipper.BlockQuerierWithFingerprintRange{
- BlockQuerier: blockQuerier,
- FingerprintBounds: v1.NewBounds(fromFp, throughFp),
+ querier := &bloomshipper.CloseableBlockQuerier{
+ BlockQuerier: blockQuerier,
+ BlockRef: block,
}
queriers = append(queriers, querier)
metas = append(metas, meta)
@@ -364,12 +370,12 @@ func createBlocks(t *testing.T, tenant string, n int, from, through model.Time,
return blocks, metas, queriers, series
}
-func newMockBloomStore(bqs []bloomshipper.BlockQuerierWithFingerprintRange) *mockBloomStore {
+func newMockBloomStore(bqs []*bloomshipper.CloseableBlockQuerier) *mockBloomStore {
return &mockBloomStore{bqs: bqs}
}
type mockBloomStore struct {
- bqs []bloomshipper.BlockQuerierWithFingerprintRange
+ bqs []*bloomshipper.CloseableBlockQuerier
// mock how long it takes to serve block queriers
delay time.Duration
// mock response error when serving block queriers in ForEach
@@ -379,16 +385,11 @@ type mockBloomStore struct {
var _ bloomshipper.Interface = &mockBloomStore{}
// GetBlockRefs implements bloomshipper.Interface
-func (s *mockBloomStore) GetBlockRefs(_ context.Context, tenant string, _ bloomshipper.Interval) ([]bloomshipper.BlockRef, error) {
+func (s *mockBloomStore) GetBlockRefs(_ context.Context, _ string, _ bloomshipper.Interval) ([]bloomshipper.BlockRef, error) {
time.Sleep(s.delay)
blocks := make([]bloomshipper.BlockRef, 0, len(s.bqs))
for i := range s.bqs {
- blocks = append(blocks, bloomshipper.BlockRef{
- Ref: bloomshipper.Ref{
- Bounds: v1.NewBounds(s.bqs[i].Min, s.bqs[i].Max),
- TenantID: tenant,
- },
- })
+ blocks = append(blocks, s.bqs[i].BlockRef)
}
return blocks, nil
}
@@ -403,7 +404,7 @@ func (s *mockBloomStore) ForEach(_ context.Context, _ string, _ []bloomshipper.B
return s.err
}
- shuffled := make([]bloomshipper.BlockQuerierWithFingerprintRange, len(s.bqs))
+ shuffled := make([]*bloomshipper.CloseableBlockQuerier, len(s.bqs))
_ = copy(shuffled, s.bqs)
rand.Shuffle(len(shuffled), func(i, j int) {
@@ -413,7 +414,7 @@ func (s *mockBloomStore) ForEach(_ context.Context, _ string, _ []bloomshipper.B
for _, bq := range shuffled {
// ignore errors in the mock
time.Sleep(s.delay)
- err := callback(bq.BlockQuerier, bq.FingerprintBounds)
+ err := callback(bq.BlockQuerier, bq.Bounds)
if err != nil {
return err
}
@@ -443,7 +444,7 @@ func createQueryInputFromBlockData(t *testing.T, tenant string, data [][]v1.Seri
return res
}
-func createBlockRefsFromBlockData(t *testing.T, tenant string, data []bloomshipper.BlockQuerierWithFingerprintRange) []bloomshipper.BlockRef {
+func createBlockRefsFromBlockData(t *testing.T, tenant string, data []*bloomshipper.CloseableBlockQuerier) []bloomshipper.BlockRef {
t.Helper()
res := make([]bloomshipper.BlockRef, 0)
for i := range data {
@@ -451,7 +452,7 @@ func createBlockRefsFromBlockData(t *testing.T, tenant string, data []bloomshipp
Ref: bloomshipper.Ref{
TenantID: tenant,
TableName: "",
- Bounds: v1.NewBounds(data[i].Min, data[i].Max),
+ Bounds: v1.NewBounds(data[i].Bounds.Min, data[i].Bounds.Max),
StartTimestamp: 0,
EndTimestamp: 0,
Checksum: 0,
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go
index 2f9e98f89d9c0..52899a03fea0a 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache.go
@@ -17,12 +17,13 @@ import (
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
)
-type ClosableBlockQuerier struct {
+type CloseableBlockQuerier struct {
+ BlockRef
*v1.BlockQuerier
close func() error
}
-func (c *ClosableBlockQuerier) Close() error {
+func (c *CloseableBlockQuerier) Close() error {
if c.close != nil {
return c.close()
}
@@ -53,7 +54,7 @@ func NewBlockDirectory(ref BlockRef, path string, logger log.Logger) BlockDirect
return BlockDirectory{
BlockRef: ref,
Path: path,
- activeQueriers: atomic.NewInt32(0),
+ refCount: atomic.NewInt32(0),
removeDirectoryTimeout: time.Minute,
logger: logger,
activeQueriersCheckInterval: defaultActiveQueriersCheckInterval,
@@ -66,7 +67,7 @@ type BlockDirectory struct {
BlockRef
Path string
removeDirectoryTimeout time.Duration
- activeQueriers *atomic.Int32
+ refCount *atomic.Int32
logger log.Logger
activeQueriersCheckInterval time.Duration
}
@@ -75,17 +76,24 @@ func (b BlockDirectory) Block() *v1.Block {
return v1.NewBlock(v1.NewDirectoryBlockReader(b.Path))
}
+func (b BlockDirectory) Acquire() {
+ _ = b.refCount.Inc()
+}
+
+func (b BlockDirectory) Release() error {
+ _ = b.refCount.Dec()
+ return nil
+}
+
// BlockQuerier returns a new block querier from the directory.
// It increments the counter of active queriers for this directory.
// The counter is decreased when the returned querier is closed.
-func (b BlockDirectory) BlockQuerier() *ClosableBlockQuerier {
- b.activeQueriers.Inc()
- return &ClosableBlockQuerier{
+func (b BlockDirectory) BlockQuerier() *CloseableBlockQuerier {
+ b.Acquire()
+ return &CloseableBlockQuerier{
BlockQuerier: v1.NewBlockQuerier(b.Block()),
- close: func() error {
- _ = b.activeQueriers.Dec()
- return nil
- },
+ BlockRef: b.BlockRef,
+ close: b.Release,
}
}
@@ -99,7 +107,7 @@ func (b *BlockDirectory) removeDirectoryAsync() {
for {
select {
case <-ticker.C:
- if b.activeQueriers.Load() == 0 {
+ if b.refCount.Load() == 0 {
err := deleteFolder(b.Path)
if err == nil {
return
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache_test.go b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
index de916377a3f7b..c85f0382bafdd 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
@@ -35,21 +35,21 @@ func TestBlockDirectory_Cleanup(t *testing.T) {
require.NoError(t, err)
require.DirExists(t, extractedBlockDirectory)
- cached := BlockDirectory{
+ blockDir := BlockDirectory{
Path: extractedBlockDirectory,
removeDirectoryTimeout: timeout,
activeQueriersCheckInterval: checkInterval,
logger: log.NewNopLogger(),
- activeQueriers: atomic.NewInt32(0),
+ refCount: atomic.NewInt32(0),
}
// acquire directory
- cached.activeQueriers.Inc()
+ blockDir.refCount.Inc()
// start cleanup goroutine
- cached.removeDirectoryAsync()
+ blockDir.removeDirectoryAsync()
if tc.releaseQuerier {
// release directory
- cached.activeQueriers.Dec()
+ blockDir.refCount.Dec()
}
// ensure directory does not exist any more
@@ -66,15 +66,15 @@ func Test_ClosableBlockQuerier(t *testing.T) {
err := extractArchive(blockFilePath, extractedBlockDirectory)
require.NoError(t, err)
- cached := BlockDirectory{
+ blockDir := BlockDirectory{
Path: extractedBlockDirectory,
removeDirectoryTimeout: 100 * time.Millisecond,
- activeQueriers: atomic.NewInt32(0),
+ refCount: atomic.NewInt32(0),
}
- querier := cached.BlockQuerier()
- require.Equal(t, int32(1), cached.activeQueriers.Load())
+ querier := blockDir.BlockQuerier()
+ require.Equal(t, int32(1), blockDir.refCount.Load())
require.NoError(t, querier.Close())
- require.Equal(t, int32(0), cached.activeQueriers.Load())
+ require.Equal(t, int32(0), blockDir.refCount.Load())
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
index 6b477c1a0aa3b..bb9a70644f5e6 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
@@ -21,7 +21,7 @@ type metrics struct{}
type fetcher interface {
FetchMetas(ctx context.Context, refs []MetaRef) ([]Meta, error)
- FetchBlocks(ctx context.Context, refs []BlockRef) ([]BlockDirectory, error)
+ FetchBlocks(ctx context.Context, refs []BlockRef) ([]*CloseableBlockQuerier, error)
Close()
}
@@ -124,7 +124,7 @@ func (f *Fetcher) writeBackMetas(ctx context.Context, metas []Meta) error {
return f.metasCache.Store(ctx, keys, data)
}
-func (f *Fetcher) FetchBlocks(ctx context.Context, refs []BlockRef) ([]BlockDirectory, error) {
+func (f *Fetcher) FetchBlocks(ctx context.Context, refs []BlockRef) ([]*CloseableBlockQuerier, error) {
n := len(refs)
responses := make(chan downloadResponse[BlockDirectory], n)
@@ -140,13 +140,13 @@ func (f *Fetcher) FetchBlocks(ctx context.Context, refs []BlockRef) ([]BlockDire
})
}
- results := make([]BlockDirectory, len(refs))
+ results := make([]*CloseableBlockQuerier, n)
for i := 0; i < n; i++ {
select {
case err := <-errors:
return results, err
case res := <-responses:
- results[res.idx] = res.item
+ results[res.idx] = res.item.BlockQuerier()
}
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/shipper.go b/pkg/storage/stores/shipper/bloomshipper/shipper.go
index cff17296de9c0..6d6322c91382b 100644
--- a/pkg/storage/stores/shipper/bloomshipper/shipper.go
+++ b/pkg/storage/stores/shipper/bloomshipper/shipper.go
@@ -14,11 +14,6 @@ import (
"github.com/grafana/loki/pkg/storage/stores/shipper/bloomshipper/config"
)
-type BlockQuerierWithFingerprintRange struct {
- *v1.BlockQuerier
- v1.FingerprintBounds
-}
-
type ForEachBlockCallback func(bq *v1.BlockQuerier, bounds v1.FingerprintBounds) error
type Interface interface {
@@ -58,21 +53,21 @@ func (s *Shipper) GetBlockRefs(ctx context.Context, tenantID string, interval In
return blockRefs, nil
}
-func (s *Shipper) ForEach(ctx context.Context, _ string, blocks []BlockRef, callback ForEachBlockCallback) error {
- blockDirs, err := s.store.FetchBlocks(ctx, blocks)
+func (s *Shipper) ForEach(ctx context.Context, _ string, refs []BlockRef, callback ForEachBlockCallback) error {
+ bqs, err := s.store.FetchBlocks(ctx, refs)
+
if err != nil {
return err
}
- if len(blockDirs) != len(blocks) {
- return fmt.Errorf("number of responses (%d) does not match number of requests (%d)", len(blockDirs), len(blocks))
+ if len(bqs) != len(refs) {
+ return fmt.Errorf("number of response (%d) does not match number of requests (%d)", len(bqs), len(refs))
}
- for i := range blocks {
- if blockDirs[i].BlockRef != blocks[i] {
- return fmt.Errorf("invalid order of responses: expected: %v, got: %v", blocks[i], blockDirs[i].BlockRef)
- }
- err := runCallback(callback, blockDirs[i].BlockQuerier(), blockDirs[i].BlockRef.Bounds)
+ for i := range bqs {
+ err := callback(bqs[i].BlockQuerier, bqs[i].Bounds)
+ // close querier to decrement ref count
+ bqs[i].Close()
if err != nil {
return err
}
@@ -80,14 +75,6 @@ func (s *Shipper) ForEach(ctx context.Context, _ string, blocks []BlockRef, call
return nil
}
-func runCallback(callback ForEachBlockCallback, bq *ClosableBlockQuerier, bounds v1.FingerprintBounds) error {
- defer func(b *ClosableBlockQuerier) {
- _ = b.Close()
- }(bq)
-
- return callback(bq.BlockQuerier, bounds)
-}
-
func (s *Shipper) Stop() {
s.store.Stop()
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go
index 5f1b7bf8d1494..c95d04122117f 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store.go
@@ -25,7 +25,7 @@ var (
type Store interface {
ResolveMetas(ctx context.Context, params MetaSearchParams) ([][]MetaRef, []*Fetcher, error)
FetchMetas(ctx context.Context, params MetaSearchParams) ([]Meta, error)
- FetchBlocks(ctx context.Context, refs []BlockRef) ([]BlockDirectory, error)
+ FetchBlocks(ctx context.Context, refs []BlockRef) ([]*CloseableBlockQuerier, error)
Fetcher(ts model.Time) (*Fetcher, error)
Client(ts model.Time) (Client, error)
Stop()
@@ -112,7 +112,7 @@ func (b *bloomStoreEntry) FetchMetas(ctx context.Context, params MetaSearchParam
}
// FetchBlocks implements Store.
-func (b *bloomStoreEntry) FetchBlocks(ctx context.Context, refs []BlockRef) ([]BlockDirectory, error) {
+func (b *bloomStoreEntry) FetchBlocks(ctx context.Context, refs []BlockRef) ([]*CloseableBlockQuerier, error) {
return b.fetcher.FetchBlocks(ctx, refs)
}
@@ -291,7 +291,7 @@ func (b *BloomStore) FetchMetas(ctx context.Context, params MetaSearchParams) ([
}
// FetchBlocks implements Store.
-func (b *BloomStore) FetchBlocks(ctx context.Context, blocks []BlockRef) ([]BlockDirectory, error) {
+func (b *BloomStore) FetchBlocks(ctx context.Context, blocks []BlockRef) ([]*CloseableBlockQuerier, error) {
var refs [][]BlockRef
var fetchers []*Fetcher
@@ -316,7 +316,7 @@ func (b *BloomStore) FetchBlocks(ctx context.Context, blocks []BlockRef) ([]Bloc
}
}
- results := make([]BlockDirectory, 0, len(blocks))
+ results := make([]*CloseableBlockQuerier, 0, len(blocks))
for i := range fetchers {
res, err := fetchers[i].FetchBlocks(ctx, refs[i])
results = append(results, res...)
@@ -325,8 +325,8 @@ func (b *BloomStore) FetchBlocks(ctx context.Context, blocks []BlockRef) ([]Bloc
}
}
- // sort responses (results []BlockDirectory) based on requests (blocks []BlockRef)
- slices.SortFunc(results, func(a, b BlockDirectory) int {
+ // sort responses (results []*CloseableBlockQuerier) based on requests (blocks []BlockRef)
+ slices.SortFunc(results, func(a, b *CloseableBlockQuerier) int {
ia, ib := slices.Index(blocks, a.BlockRef), slices.Index(blocks, b.BlockRef)
if ia < ib {
return -1
diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go
index aa0ca46e0660b..42b35111688b2 100644
--- a/pkg/storage/stores/shipper/bloomshipper/store_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go
@@ -248,19 +248,19 @@ func TestBloomStore_FetchBlocks(t *testing.T) {
ctx := context.Background()
// first call fetches two blocks from cache
- blockDirs, err := store.FetchBlocks(ctx, []BlockRef{b1.BlockRef, b3.BlockRef})
+ bqs, err := store.FetchBlocks(ctx, []BlockRef{b1.BlockRef, b3.BlockRef})
require.NoError(t, err)
- require.Len(t, blockDirs, 2)
+ require.Len(t, bqs, 2)
- require.ElementsMatch(t, []BlockRef{b1.BlockRef, b3.BlockRef}, []BlockRef{blockDirs[0].BlockRef, blockDirs[1].BlockRef})
+ require.Equal(t, []BlockRef{b1.BlockRef, b3.BlockRef}, []BlockRef{bqs[0].BlockRef, bqs[1].BlockRef})
// second call fetches two blocks from cache and two from storage
- blockDirs, err = store.FetchBlocks(ctx, []BlockRef{b1.BlockRef, b2.BlockRef, b3.BlockRef, b4.BlockRef})
+ bqs, err = store.FetchBlocks(ctx, []BlockRef{b1.BlockRef, b2.BlockRef, b3.BlockRef, b4.BlockRef})
require.NoError(t, err)
- require.Len(t, blockDirs, 4)
+ require.Len(t, bqs, 4)
require.Equal(t,
[]BlockRef{b1.BlockRef, b2.BlockRef, b3.BlockRef, b4.BlockRef},
- []BlockRef{blockDirs[0].BlockRef, blockDirs[1].BlockRef, blockDirs[2].BlockRef, blockDirs[3].BlockRef},
+ []BlockRef{bqs[0].BlockRef, bqs[1].BlockRef, bqs[2].BlockRef, bqs[3].BlockRef},
)
}
|
bloomstore
|
Change signature of `FetchBlocks()` to return `[]*CloseableBlockQuerier` (#11884)
|
dec695f1126c75512ac73e4f7cb2b43f73cb9cd8
|
2022-03-23 14:40:16
|
Sashank Agarwal
|
operator: make replicationFactor optional (#5701)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 74fc2a19ea3e6..dcca4586c7e52 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [5701](https://github.com/grafana/loki/pull/5701) **sasagarw**: Make ReplicationFactor optional in LokiStack API
- [5695](https://github.com/grafana/loki/pull/5695) **xperimental**: Update Go to 1.17
- [5615](https://github.com/grafana/loki/pull/5615) **sasagarw**: Document how to connect to LokiStack gateway component
- [5655](https://github.com/grafana/loki/pull/5655) **xperimental**: Update Loki operand to 2.4.2
diff --git a/operator/api/v1beta1/lokistack_types.go b/operator/api/v1beta1/lokistack_types.go
index 02b53cafc7468..c71597f74a814 100644
--- a/operator/api/v1beta1/lokistack_types.go
+++ b/operator/api/v1beta1/lokistack_types.go
@@ -510,8 +510,8 @@ type LokiStackSpec struct {
// ReplicationFactor defines the policy for log stream replication.
//
- // +required
- // +kubebuilder:validation:Required
+ // +optional
+ // +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum:=1
// +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors="urn:alm:descriptor:com.tectonic.ui:number",displayName="Replication Factor"
ReplicationFactor int32 `json:"replicationFactor"`
diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
index fb3a8e1fa251e..75461dec629c1 100644
--- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
@@ -11,7 +11,6 @@ metadata:
"name": "lokistack-sample"
},
"spec": {
- "replicationFactor": 2,
"size": "1x.small",
"storage": {
"secret": {
diff --git a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
index 852bb493de1ca..291b116135767 100644
--- a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
@@ -807,7 +807,6 @@ spec:
- mode
type: object
required:
- - replicationFactor
- size
- storage
- storageClassName
diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
index ae454466ee07f..c8bce14cc304c 100644
--- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
@@ -585,7 +585,6 @@ spec:
- mode
type: object
required:
- - replicationFactor
- size
- storage
- storageClassName
diff --git a/operator/config/samples/loki_v1beta1_lokistack.yaml b/operator/config/samples/loki_v1beta1_lokistack.yaml
index c32f01681db86..5839d2e80b00a 100644
--- a/operator/config/samples/loki_v1beta1_lokistack.yaml
+++ b/operator/config/samples/loki_v1beta1_lokistack.yaml
@@ -4,7 +4,6 @@ metadata:
name: lokistack-sample
spec:
size: 1x.small
- replicationFactor: 2
storage:
secret:
name: test
diff --git a/operator/hack/lokistack_dev.yaml b/operator/hack/lokistack_dev.yaml
index 568c7d3889675..0bcabe5a645ff 100644
--- a/operator/hack/lokistack_dev.yaml
+++ b/operator/hack/lokistack_dev.yaml
@@ -4,7 +4,6 @@ metadata:
name: lokistack-dev
spec:
size: 1x.extra-small
- replicationFactor: 1
storage:
secret:
name: test
diff --git a/operator/hack/lokistack_gateway_dev.yaml b/operator/hack/lokistack_gateway_dev.yaml
index 7f818196ab05d..56037666b265c 100644
--- a/operator/hack/lokistack_gateway_dev.yaml
+++ b/operator/hack/lokistack_gateway_dev.yaml
@@ -4,7 +4,6 @@ metadata:
name: lokistack-dev
spec:
size: 1x.extra-small
- replicationFactor: 1
storage:
secret:
name: test
diff --git a/operator/hack/lokistack_gateway_ocp.yaml b/operator/hack/lokistack_gateway_ocp.yaml
index 495980c1405d4..296bc010c8861 100644
--- a/operator/hack/lokistack_gateway_ocp.yaml
+++ b/operator/hack/lokistack_gateway_ocp.yaml
@@ -4,7 +4,6 @@ metadata:
name: lokistack-dev
spec:
size: 1x.extra-small
- replicationFactor: 1
storage:
secret:
name: test
|
operator
|
make replicationFactor optional (#5701)
|
5f183393f9d09fccd1eb25def12301332a4d5a45
|
2025-01-28 23:43:23
|
renovate[bot]
|
chore(deps): update fluent/fluent-bit docker tag to v3.2.5 (main) (#15986)
| false
|
diff --git a/clients/cmd/fluent-bit/Dockerfile b/clients/cmd/fluent-bit/Dockerfile
index d75304df1cf1f..049c014ba8757 100644
--- a/clients/cmd/fluent-bit/Dockerfile
+++ b/clients/cmd/fluent-bit/Dockerfile
@@ -14,7 +14,7 @@ RUN go build \
-o clients/cmd/fluent-bit/out_grafana_loki.so \
/src/clients/cmd/fluent-bit
-FROM fluent/fluent-bit:3.2.4@sha256:a185ac0516e1f35568ff0662f12c4ada0ea38c4300ed223d0fde485599dff5b5
+FROM fluent/fluent-bit:3.2.5@sha256:e278039fe91dd3c71765e85f3e7ac8de40ff2fa707728c52be90574f3dd9fb29
COPY --from=builder /src/clients/cmd/fluent-bit/out_grafana_loki.so /fluent-bit/bin
COPY clients/cmd/fluent-bit/fluent-bit.conf /fluent-bit/etc/fluent-bit.conf
|
chore
|
update fluent/fluent-bit docker tag to v3.2.5 (main) (#15986)
|
30afd21ce75ac87dcb4c10bac40bcc390a5da7be
|
2024-09-23 21:37:35
|
J Stickler
|
docs: Replace shortcodes on Storage topic (#13748)
| false
|
diff --git a/docs/sources/configure/storage.md b/docs/sources/configure/storage.md
index 27466dbc6e50c..0b98cfc18d848 100644
--- a/docs/sources/configure/storage.md
+++ b/docs/sources/configure/storage.md
@@ -85,9 +85,9 @@ You may use any substitutable services, such as those that implement the S3 API
Cassandra is a popular database and one of the possible chunk stores for Loki and is production safe.
-{{< collapse title="Title of hidden content" >}}
+{{< admonition type="note" >}}
This storage type for chunks is deprecated and may be removed in future major versions of Loki.
-{{< /collapse >}}
+{{< /admonition >}}
## Index storage
@@ -95,25 +95,25 @@ This storage type for chunks is deprecated and may be removed in future major ve
Cassandra can also be utilized for the index store and aside from the [boltdb-shipper](https://grafana.com/docs/loki/<LOKI_VERSION>/operations/storage/boltdb-shipper/), it's the only non-cloud offering that can be used for the index that's horizontally scalable and has configurable replication. It's a good candidate when you already run Cassandra, are running on-prem, or do not wish to use a managed cloud offering.
-{{< collapse title="Title of hidden content" >}}
+{{< admonition type="note" >}}
This storage type for indexes is deprecated and may be removed in future major versions of Loki.
-{{< /collapse >}}
+{{< /admonition >}}
### BigTable (deprecated)
Bigtable is a cloud database offered by Google. It is a good candidate for a managed index store if you're already using it (due to its heavy fixed costs) or wish to run in GCP.
-{{< collapse title="Title of hidden content" >}}
+{{< admonition type="note" >}}
This storage type for indexes is deprecated and may be removed in future major versions of Loki.
-{{< /collapse >}}
+{{< /admonition >}}
### DynamoDB (deprecated)
DynamoDB is a cloud database offered by AWS. It is a good candidate for a managed index store, especially if you're already running in AWS.
-{{< collapse title="Title of hidden content" >}}
+{{< admonition type="note" >}}
This storage type for indexes is deprecated and may be removed in future major versions of Loki.
-{{< /collapse >}}
+{{< /admonition >}}
#### Rate limiting
@@ -123,9 +123,9 @@ DynamoDB is susceptible to rate limiting, particularly due to overconsuming what
BoltDB is an embedded database on disk. It is not replicated and thus cannot be used for high availability or clustered Loki deployments, but is commonly paired with a `filesystem` chunk store for proof of concept deployments, trying out Loki, and development. The [boltdb-shipper](https://grafana.com/docs/loki/<LOKI_VERSION>/operations/storage/boltdb-shipper/) aims to support clustered deployments using `boltdb` as an index.
-{{< collapse title="Title of hidden content" >}}
+{{< admonition type="note" >}}
This storage type for indexes is deprecated and may be removed in future major versions of Loki.
-{{< /collapse >}}
+{{< /admonition >}}
## Schema Config
@@ -440,9 +440,9 @@ storage_config:
### On premise deployment (Cassandra+Cassandra)
-{{< collapse title="Title of hidden content" >}}
+{{< admonition type="note" >}}
Cassandra as storage backend for chunks and indexes is deprecated.
-{{< /collapse >}}
+{{< /admonition >}}
**Keeping this for posterity, but this is likely not a common config. Cassandra should work and could be faster in some situations but is likely much more expensive.**
|
docs
|
Replace shortcodes on Storage topic (#13748)
|
ad85cc2276a42d899fe27b27f5940550f47bcc21
|
2024-12-18 21:18:22
|
J Stickler
|
docs: fix admonition, line spacing (#15462)
| false
|
diff --git a/docs/sources/send-data/docker-driver/_index.md b/docs/sources/send-data/docker-driver/_index.md
index 5fdb9c208eecd..08d3a5f8b4378 100644
--- a/docs/sources/send-data/docker-driver/_index.md
+++ b/docs/sources/send-data/docker-driver/_index.md
@@ -17,9 +17,7 @@ Docker plugins are not supported on Windows; see the [Docker Engine managed plug
{{< /admonition >}}
Documentation on configuring the Loki Docker Driver can be found on the
-[configuration page]({{< relref "./configuration" >}}).
-
-If you have any questions or issues using the Docker plugin, open an issue in
+If you have any questions or issues using the Docker plugin, open an issue in
the [Loki repository](https://github.com/grafana/loki/issues).
## Install the Docker driver client
@@ -31,16 +29,18 @@ Run the following command to install the plugin, updating the release version if
```bash
docker plugin install grafana/loki-docker-driver:2.9.2 --alias loki --grant-all-permissions
```
-{{% admonition type="note" %}}
+
+{{< admonition type="note" >}}
Add `-arm64` to the image tag for AMR64 hosts.
-{{% /admonition %}}
+{{< /admonition >}}
-To check installed plugins, use the `docker plugin ls` command.
+To check installed plugins, use the `docker plugin ls` command.
Plugins that have started successfully are listed as enabled:
```bash
-$ docker plugin ls
+docker plugin ls
```
+
You should see output similar to the following:
```bash
@@ -48,7 +48,7 @@ ID NAME DESCRIPTION ENABLED
ac720b8fcfdb loki Loki Logging Driver true
```
-Once you have successfully installed the plugin you can [configure]({{< relref "./configuration" >}}) it.
+Once you have successfully installed the plugin you can [configure](https://grafana.com/docs/loki/<LOKI_VERSION/configure/) it.
## Upgrade the Docker driver client
@@ -61,6 +61,7 @@ docker plugin upgrade loki grafana/loki-docker-driver:2.9.2 --grant-all-permissi
docker plugin enable loki
systemctl restart docker
```
+
{{< admonition type="note" >}}
Update the version number to the appropriate version.
{{< /admonition >}}
@@ -80,4 +81,4 @@ The driver keeps all logs in memory and will drop log entries if Loki is not rea
The wait time can be lowered by setting `loki-retries=2`, `loki-max-backoff=800ms`, `loki-timeout=1s` and `keep-file=true`. This way the daemon will be locked only for a short time and the logs will be persisted locally when the Loki client is unable to re-connect.
-To avoid this issue, use the Promtail [Docker target]({{< relref "../../send-data/promtail/configuration#docker" >}}) or [Docker service discovery]({{< relref "../../send-data/promtail/configuration#docker_sd_configs" >}}).
+To avoid this issue, use the Promtail [Docker target](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/promtail/configuration/#docker) or [Docker service discovery](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/promtail/configuration/#docker_sd_configs).
|
docs
|
fix admonition, line spacing (#15462)
|
0695424f7dd62435df3a9981276b40f3c5ef5641
|
2023-10-26 14:00:31
|
renovate[bot]
|
fix(deps): update module google.golang.org/grpc [security] (main) (#11031)
| false
|
diff --git a/go.mod b/go.mod
index 77228a1eaa12d..ddf8b367742f5 100644
--- a/go.mod
+++ b/go.mod
@@ -104,7 +104,7 @@ require (
golang.org/x/sys v0.13.0
golang.org/x/time v0.3.0
google.golang.org/api v0.132.0
- google.golang.org/grpc v1.58.2
+ google.golang.org/grpc v1.58.3
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
diff --git a/go.sum b/go.sum
index 74f9d98ad4fc1..a141a9213dd6a 100644
--- a/go.sum
+++ b/go.sum
@@ -2518,8 +2518,8 @@ google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
-google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
-google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
+google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
diff --git a/pkg/push/go.mod b/pkg/push/go.mod
index 202c37781699e..f0eaf486ab343 100644
--- a/pkg/push/go.mod
+++ b/pkg/push/go.mod
@@ -6,7 +6,7 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/stretchr/testify v1.8.2
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
- google.golang.org/grpc v1.53.0
+ google.golang.org/grpc v1.56.3
)
require (
@@ -17,8 +17,8 @@ require (
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
- google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
- google.golang.org/protobuf v1.29.1 // indirect
+ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
+ google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/pkg/push/go.sum b/pkg/push/go.sum
index 2abbf99450f81..d2d95b4bd9320 100644
--- a/pkg/push/go.sum
+++ b/pkg/push/go.sum
@@ -39,8 +39,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -49,14 +47,10 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
-golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -67,14 +61,14 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
-google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
-google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
-google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
+google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM=
-google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 8d3a353c1d581..c06db679d89cc 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -171,15 +171,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
ID: http2.SettingMaxFrameSize,
Val: http2MaxFrameLen,
}}
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
- // permitted in the HTTP2 spec.
- maxStreams := config.MaxStreams
- if maxStreams == 0 {
- maxStreams = math.MaxUint32
- } else {
+ if config.MaxStreams != math.MaxUint32 {
isettings = append(isettings, http2.Setting{
ID: http2.SettingMaxConcurrentStreams,
- Val: maxStreams,
+ Val: config.MaxStreams,
})
}
dynamicWindow := true
@@ -258,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
framer: framer,
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
- maxStreams: maxStreams,
+ maxStreams: config.MaxStreams,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 244123c6c5a89..eeae92fbe0204 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -115,12 +115,6 @@ type serviceInfo struct {
mdata any
}
-type serverWorkerData struct {
- st transport.ServerTransport
- wg *sync.WaitGroup
- stream *transport.Stream
-}
-
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts serverOptions
@@ -145,7 +139,7 @@ type Server struct {
channelzID *channelz.Identifier
czData *channelzData
- serverWorkerChannel chan *serverWorkerData
+ serverWorkerChannel chan func()
}
type serverOptions struct {
@@ -179,6 +173,7 @@ type serverOptions struct {
}
var defaultServerOptions = serverOptions{
+ maxConcurrentStreams: math.MaxUint32,
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second,
@@ -404,6 +399,9 @@ func MaxSendMsgSize(m int) ServerOption {
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
// of concurrent streams to each ServerTransport.
func MaxConcurrentStreams(n uint32) ServerOption {
+ if n == 0 {
+ n = math.MaxUint32
+ }
return newFuncServerOption(func(o *serverOptions) {
o.maxConcurrentStreams = n
})
@@ -605,24 +603,19 @@ const serverWorkerResetThreshold = 1 << 16
// [1] https://github.com/golang/go/issues/18138
func (s *Server) serverWorker() {
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
- data, ok := <-s.serverWorkerChannel
+ f, ok := <-s.serverWorkerChannel
if !ok {
return
}
- s.handleSingleStream(data)
+ f()
}
go s.serverWorker()
}
-func (s *Server) handleSingleStream(data *serverWorkerData) {
- defer data.wg.Done()
- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
-}
-
// initServerWorkers creates worker goroutines and a channel to process incoming
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
- s.serverWorkerChannel = make(chan *serverWorkerData)
+ s.serverWorkerChannel = make(chan func())
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
go s.serverWorker()
}
@@ -982,21 +975,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
defer st.Close(errors.New("finished serving streams for the server transport"))
var wg sync.WaitGroup
+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
st.HandleStreams(func(stream *transport.Stream) {
wg.Add(1)
+
+ streamQuota.acquire()
+ f := func() {
+ defer streamQuota.release()
+ defer wg.Done()
+ s.handleStream(st, stream, s.traceInfo(st, stream))
+ }
+
if s.opts.numServerWorkers > 0 {
- data := &serverWorkerData{st: st, wg: &wg, stream: stream}
select {
- case s.serverWorkerChannel <- data:
+ case s.serverWorkerChannel <- f:
return
default:
// If all stream workers are busy, fallback to the default code path.
}
}
- go func() {
- defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
- }()
+ go f()
}, func(ctx context.Context, method string) context.Context {
if !EnableTracing {
return ctx
@@ -2091,3 +2089,34 @@ func validateSendCompressor(name, clientCompressors string) error {
}
return fmt.Errorf("client does not support compressor %q", name)
}
+
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be
+// called synchronously; release may be called asynchronously.
+type atomicSemaphore struct {
+ n atomic.Int64
+ wait chan struct{}
+}
+
+func (q *atomicSemaphore) acquire() {
+ if q.n.Add(-1) < 0 {
+ // We ran out of quota. Block until a release happens.
+ <-q.wait
+ }
+}
+
+func (q *atomicSemaphore) release() {
+ // N.B. the "<= 0" check below should allow for this to work with multiple
+ // concurrent calls to acquire, but also note that with synchronous calls to
+ // acquire, as our system does, n will never be less than -1. There are
+ // fairness issues (queuing) to consider if this was to be generalized.
+ if q.n.Add(1) <= 0 {
+ // An acquire was waiting on us. Unblock it.
+ q.wait <- struct{}{}
+ }
+}
+
+func newHandlerQuota(n uint32) *atomicSemaphore {
+ a := &atomicSemaphore{wait: make(chan struct{}, 1)}
+ a.n.Store(int64(n))
+ return a
+}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index d3f5bcbfcef8b..724ad21021300 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.58.2"
+const Version = "1.58.3"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 24ea181a4b1b9..8e76eb21adbf8 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1747,7 +1747,7 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.58.2
+# google.golang.org/grpc v1.58.3
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes
|
fix
|
update module google.golang.org/grpc [security] (main) (#11031)
|
a3feacea347a636e733df36c362ac15667008534
|
2023-08-23 18:58:58
|
dependabot[bot]
|
build(deps): bump github.com/google/uuid from 1.3.0 to 1.3.1 in /operator (#10305)
| false
|
diff --git a/operator/go.mod b/operator/go.mod
index c51d3a4daaa04..1690f35c7287c 100644
--- a/operator/go.mod
+++ b/operator/go.mod
@@ -6,7 +6,7 @@ require (
github.com/ViaQ/logerr/v2 v2.1.0
github.com/go-logr/logr v1.2.3
github.com/google/go-cmp v0.5.9
- github.com/google/uuid v1.3.0
+ github.com/google/uuid v1.3.1
github.com/grafana/loki v1.6.2-0.20230403212622-90888a0cc737
github.com/grafana/loki/operator/apis/loki v0.0.0-00010101000000-000000000000
github.com/imdario/mergo v0.3.13
diff --git a/operator/go.sum b/operator/go.sum
index 4c8b9a98ed3e3..de21537800333 100644
--- a/operator/go.sum
+++ b/operator/go.sum
@@ -403,8 +403,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
build
|
bump github.com/google/uuid from 1.3.0 to 1.3.1 in /operator (#10305)
|
1fa952dbf29554fba3da3b88b1292fbb8c7a27e3
|
2025-02-07 22:16:22
|
Cyril Tovena
|
refactor(dataobj): Consolidate configuration and add querier support (#16144)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index 4b7bbd3c48c52..7d64bfe9a753e 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -786,38 +786,43 @@ kafka_config:
# CLI flag: -kafka.max-consumer-lag-at-startup
[max_consumer_lag_at_startup: <duration> | default = 15s]
-dataobj_consumer:
- builderconfig:
- # The size of the target page to use for the data object builder.
- # CLI flag: -dataobj-consumer.target-page-size
- [target_page_size: <int> | default = 2MiB]
-
- # The size of the target object to use for the data object builder.
- # CLI flag: -dataobj-consumer.target-object-size
- [target_object_size: <int> | default = 1GiB]
-
- # Configures a maximum size for sections, for sections that support it.
- # CLI flag: -dataobj-consumer.target-section-size
- [target_section_size: <int> | default = 128MiB]
-
- # The size of the buffer to use for sorting logs.
- # CLI flag: -dataobj-consumer.buffer-size
- [buffer_size: <int> | default = 16MiB]
-
- uploader:
- # The size of the SHA prefix to use for generating object storage keys for
- # data objects.
- # CLI flag: -dataobj-consumer.sha-prefix-size
- [shaprefixsize: <int> | default = 2]
+dataobj:
+ consumer:
+ builderconfig:
+ # The size of the target page to use for the data object builder.
+ # CLI flag: -dataobj-consumer.target-page-size
+ [target_page_size: <int> | default = 2MiB]
+
+ # The size of the target object to use for the data object builder.
+ # CLI flag: -dataobj-consumer.target-object-size
+ [target_object_size: <int> | default = 1GiB]
+
+ # Configures a maximum size for sections, for sections that support it.
+ # CLI flag: -dataobj-consumer.target-section-size
+ [target_section_size: <int> | default = 128MiB]
+
+ # The size of the buffer to use for sorting logs.
+ # CLI flag: -dataobj-consumer.buffer-size
+ [buffer_size: <int> | default = 16MiB]
+
+ uploader:
+ # The size of the SHA prefix to use for generating object storage keys for
+ # data objects.
+ # CLI flag: -dataobj-consumer.sha-prefix-size
+ [shaprefixsize: <int> | default = 2]
+
+ querier:
+ # Enable the dataobj querier.
+ # CLI flag: -dataobj-querier-enabled
+ [enabled: <boolean> | default = false]
- # The prefix to use for the storage bucket.
- # CLI flag: -dataobj-consumer.storage-bucket-prefix
- [storage_bucket_prefix: <string> | default = "dataobj/"]
+ # The date of the first day of when the dataobj querier should start
+ # querying from. In YYYY-MM-DD format, for example: 2018-04-15.
+ # CLI flag: -dataobj-querier-from
+ [from: <daytime> | default = 1970-01-01]
-dataobj_explorer:
- # Prefix to use when exploring the bucket. If set, only objects under this
- # prefix will be visible.
- # CLI flag: -dataobj-explorer.storage-bucket-prefix
+ # The prefix to use for the storage bucket.
+ # CLI flag: -dataobj-storage-bucket-prefix
[storage_bucket_prefix: <string> | default = "dataobj/"]
# Configuration for 'runtime config' module, responsible for reloading runtime
diff --git a/pkg/dataobj/config/config.go b/pkg/dataobj/config/config.go
new file mode 100644
index 0000000000000..ea28e5713968a
--- /dev/null
+++ b/pkg/dataobj/config/config.go
@@ -0,0 +1,31 @@
+package config
+
+import (
+ "flag"
+
+ "github.com/grafana/loki/v3/pkg/dataobj/consumer"
+ "github.com/grafana/loki/v3/pkg/dataobj/querier"
+)
+
+type Config struct {
+ Consumer consumer.Config `yaml:"consumer"`
+ Querier querier.Config `yaml:"querier"`
+ // StorageBucketPrefix is the prefix to use for the storage bucket.
+ StorageBucketPrefix string `yaml:"storage_bucket_prefix"`
+}
+
+func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
+ cfg.Consumer.RegisterFlags(f)
+ cfg.Querier.RegisterFlags(f)
+ f.StringVar(&cfg.StorageBucketPrefix, "dataobj-storage-bucket-prefix", "dataobj/", "The prefix to use for the storage bucket.")
+}
+
+func (cfg *Config) Validate() error {
+ if err := cfg.Consumer.Validate(); err != nil {
+ return err
+ }
+ if err := cfg.Querier.Validate(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/dataobj/consumer/config.go b/pkg/dataobj/consumer/config.go
index 04b1ba58bc2ba..3998e28d21d14 100644
--- a/pkg/dataobj/consumer/config.go
+++ b/pkg/dataobj/consumer/config.go
@@ -10,8 +10,6 @@ import (
type Config struct {
dataobj.BuilderConfig
UploaderConfig uploader.Config `yaml:"uploader"`
- // StorageBucketPrefix is the prefix to use for the storage bucket.
- StorageBucketPrefix string `yaml:"storage_bucket_prefix"`
}
func (cfg *Config) Validate() error {
@@ -29,5 +27,4 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
cfg.BuilderConfig.RegisterFlagsWithPrefix(prefix, f)
cfg.UploaderConfig.RegisterFlagsWithPrefix(prefix, f)
- f.StringVar(&cfg.StorageBucketPrefix, prefix+"storage-bucket-prefix", "dataobj/", "The prefix to use for the storage bucket.")
}
diff --git a/pkg/dataobj/consumer/service.go b/pkg/dataobj/consumer/service.go
index 3522d3a9492b1..df7c570fe8eff 100644
--- a/pkg/dataobj/consumer/service.go
+++ b/pkg/dataobj/consumer/service.go
@@ -42,9 +42,6 @@ type Service struct {
}
func New(kafkaCfg kafka.Config, cfg Config, topicPrefix string, bucket objstore.Bucket, instanceID string, partitionRing ring.PartitionRingReader, reg prometheus.Registerer, logger log.Logger) *Service {
- if cfg.StorageBucketPrefix != "" {
- bucket = objstore.NewPrefixedBucket(bucket, cfg.StorageBucketPrefix)
- }
s := &Service{
logger: log.With(logger, "component", groupName),
cfg: cfg,
diff --git a/pkg/dataobj/explorer/config.go b/pkg/dataobj/explorer/config.go
deleted file mode 100644
index d5212bdd7b413..0000000000000
--- a/pkg/dataobj/explorer/config.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package explorer
-
-import "flag"
-
-// Config holds the configuration for the explorer service
-type Config struct {
- // StorageBucketPrefix is the prefix to use when exploring the bucket
- StorageBucketPrefix string `yaml:"storage_bucket_prefix"`
-}
-
-// RegisterFlags registers the flags for the explorer configuration
-func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
- f.StringVar(&cfg.StorageBucketPrefix, "dataobj-explorer.storage-bucket-prefix", "dataobj/", "Prefix to use when exploring the bucket. If set, only objects under this prefix will be visible.")
-}
diff --git a/pkg/dataobj/metastore/metastore.go b/pkg/dataobj/metastore/metastore.go
index be92d636ae0f3..0d6b8de85f20e 100644
--- a/pkg/dataobj/metastore/metastore.go
+++ b/pkg/dataobj/metastore/metastore.go
@@ -23,15 +23,13 @@ const (
metastoreWindowSize = 12 * time.Hour
)
-var (
- // Define our own builder config because metastore objects are significantly smaller.
- metastoreBuilderCfg = dataobj.BuilderConfig{
- TargetObjectSize: 32 * 1024 * 1024,
- TargetPageSize: 4 * 1024 * 1024,
- BufferSize: 32 * 1024 * 1024, // 8x page size
- TargetSectionSize: 4 * 1024 * 1024, // object size / 8
- }
-)
+// Define our own builder config because metastore objects are significantly smaller.
+var metastoreBuilderCfg = dataobj.BuilderConfig{
+ TargetObjectSize: 32 * 1024 * 1024,
+ TargetPageSize: 4 * 1024 * 1024,
+ BufferSize: 32 * 1024 * 1024, // 8x page size
+ TargetSectionSize: 4 * 1024 * 1024, // object size / 8
+}
type Manager struct {
metastoreBuilder *dataobj.Builder
diff --git a/pkg/dataobj/querier/store.go b/pkg/dataobj/querier/store.go
new file mode 100644
index 0000000000000..31fcb96c0b0a0
--- /dev/null
+++ b/pkg/dataobj/querier/store.go
@@ -0,0 +1,96 @@
+package querier
+
+import (
+ "context"
+ "flag"
+ "fmt"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/thanos-io/objstore"
+
+ "github.com/grafana/loki/v3/pkg/iter"
+ "github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/logql"
+ "github.com/grafana/loki/v3/pkg/querier"
+ "github.com/grafana/loki/v3/pkg/storage/chunk"
+ storageconfig "github.com/grafana/loki/v3/pkg/storage/config"
+ "github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
+)
+
+var _ querier.Store = &Store{}
+
+type Config struct {
+ Enabled bool `yaml:"enabled" doc:"description=Enable the dataobj querier."`
+ From storageconfig.DayTime `yaml:"from" doc:"description=The date of the first day of when the dataobj querier should start querying from. In YYYY-MM-DD format, for example: 2018-04-15."`
+}
+
+func (c *Config) RegisterFlags(f *flag.FlagSet) {
+ f.BoolVar(&c.Enabled, "dataobj-querier-enabled", false, "Enable the dataobj querier.")
+ f.Var(&c.From, "dataobj-querier-from", "The start time to query from.")
+}
+
+func (c *Config) Validate() error {
+ if c.Enabled && c.From.ModelTime().Time().IsZero() {
+ return fmt.Errorf("from is required when dataobj querier is enabled")
+ }
+ return nil
+}
+
+type Store struct {
+ bucket objstore.Bucket
+}
+
+func NewStore(bucket objstore.Bucket) *Store {
+ return &Store{
+ bucket: bucket,
+ }
+}
+
+// SelectLogs implements querier.Store
+func (s *Store) SelectLogs(_ context.Context, _ logql.SelectLogParams) (iter.EntryIterator, error) {
+ // TODO: Implement
+ return iter.NoopEntryIterator, nil
+}
+
+// SelectSamples implements querier.Store
+func (s *Store) SelectSamples(_ context.Context, _ logql.SelectSampleParams) (iter.SampleIterator, error) {
+ // TODO: Implement
+ return iter.NoopSampleIterator, nil
+}
+
+// SelectSeries implements querier.Store
+func (s *Store) SelectSeries(_ context.Context, _ logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) {
+ // TODO: Implement
+ return []logproto.SeriesIdentifier{}, nil
+}
+
+// LabelValuesForMetricName implements querier.Store
+func (s *Store) LabelValuesForMetricName(_ context.Context, _ string, _ model.Time, _ model.Time, _ string, _ string, _ ...*labels.Matcher) ([]string, error) {
+ // TODO: Implement
+ return []string{}, nil
+}
+
+// LabelNamesForMetricName implements querier.Store
+func (s *Store) LabelNamesForMetricName(_ context.Context, _ string, _ model.Time, _ model.Time, _ string, _ ...*labels.Matcher) ([]string, error) {
+ // TODO: Implement
+ return []string{}, nil
+}
+
+// Stats implements querier.Store
+func (s *Store) Stats(_ context.Context, _ string, _ model.Time, _ model.Time, _ ...*labels.Matcher) (*stats.Stats, error) {
+ // TODO: Implement
+ return &stats.Stats{}, nil
+}
+
+// Volume implements querier.Store
+func (s *Store) Volume(_ context.Context, _ string, _ model.Time, _ model.Time, _ int32, _ []string, _ string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
+ // TODO: Implement
+ return &logproto.VolumeResponse{}, nil
+}
+
+// GetShards implements querier.Store
+func (s *Store) GetShards(_ context.Context, _ string, _ model.Time, _ model.Time, _ uint64, _ chunk.Predicate) (*logproto.ShardsResponse, error) {
+ // TODO: Implement
+ return &logproto.ShardsResponse{}, nil
+}
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 5295be083f900..7e4f5e318fea6 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -38,8 +38,8 @@ import (
"github.com/grafana/loki/v3/pkg/compactor"
compactorclient "github.com/grafana/loki/v3/pkg/compactor/client"
"github.com/grafana/loki/v3/pkg/compactor/deletion"
+ dataobjconfig "github.com/grafana/loki/v3/pkg/dataobj/config"
"github.com/grafana/loki/v3/pkg/dataobj/consumer"
- "github.com/grafana/loki/v3/pkg/dataobj/explorer"
"github.com/grafana/loki/v3/pkg/distributor"
"github.com/grafana/loki/v3/pkg/indexgateway"
"github.com/grafana/loki/v3/pkg/ingester"
@@ -110,8 +110,7 @@ type Config struct {
TableManager index.TableManagerConfig `yaml:"table_manager,omitempty"`
MemberlistKV memberlist.KVConfig `yaml:"memberlist"`
KafkaConfig kafka.Config `yaml:"kafka_config,omitempty" category:"experimental"`
- DataObjConsumer consumer.Config `yaml:"dataobj_consumer,omitempty" category:"experimental"`
- DataObjExplorer explorer.Config `yaml:"dataobj_explorer,omitempty" category:"experimental"`
+ DataObj dataobjconfig.Config `yaml:"dataobj,omitempty" category:"experimental"`
RuntimeConfig runtimeconfig.Config `yaml:"runtime_config,omitempty"`
OperationalConfig runtime.Config `yaml:"operational_config,omitempty"`
@@ -193,8 +192,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) {
c.KafkaConfig.RegisterFlags(f)
c.BlockBuilder.RegisterFlags(f)
c.BlockScheduler.RegisterFlags(f)
- c.DataObjExplorer.RegisterFlags(f)
- c.DataObjConsumer.RegisterFlags(f)
+ c.DataObj.RegisterFlags(f)
}
func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) {
@@ -310,8 +308,8 @@ func (c *Config) Validate() error {
if err := c.KafkaConfig.Validate(); err != nil {
errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid kafka_config config"))
}
- if err := c.DataObjConsumer.Validate(); err != nil {
- errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid dataobj_consumer config"))
+ if err := c.DataObj.Validate(); err != nil {
+ errs = append(errs, errors.Wrap(err, "CONFIG ERROR: invalid dataobj config"))
}
}
if err := c.Distributor.Validate(); err != nil {
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 540b25efe9f59..5a49ea5362188 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -53,6 +53,7 @@ import (
"github.com/grafana/loki/v3/pkg/compactor/generationnumber"
"github.com/grafana/loki/v3/pkg/dataobj/consumer"
"github.com/grafana/loki/v3/pkg/dataobj/explorer"
+ dataobjquerier "github.com/grafana/loki/v3/pkg/dataobj/querier"
"github.com/grafana/loki/v3/pkg/distributor"
"github.com/grafana/loki/v3/pkg/indexgateway"
"github.com/grafana/loki/v3/pkg/ingester"
@@ -396,6 +397,36 @@ func (t *Loki) initCodec() (services.Service, error) {
return nil, nil
}
+func (t *Loki) getQuerierStore() (querier.Store, error) {
+ if !t.Cfg.DataObj.Querier.Enabled {
+ return t.Store, nil
+ }
+
+ // verify that there's no schema with a date after the dataobj querier from date
+ for _, schema := range t.Cfg.SchemaConfig.Configs {
+ if schema.From.After(t.Cfg.DataObj.Querier.From) {
+ return nil, fmt.Errorf("dataobj querier From should be after the last schema date")
+ }
+ }
+
+ store, err := t.createDataObjBucket("dataobj-querier")
+ if err != nil {
+ return nil, err
+ }
+
+ storeCombiner := querier.NewStoreCombiner([]querier.StoreConfig{
+ {
+ Store: dataobjquerier.NewStore(store),
+ From: t.Cfg.DataObj.Querier.From.Time,
+ },
+ {
+ Store: t.Store,
+ },
+ })
+
+ return storeCombiner, nil
+}
+
func (t *Loki) initQuerier() (services.Service, error) {
logger := log.With(util_log.Logger, "component", "querier")
if t.Cfg.Ingester.QueryStoreMaxLookBackPeriod != 0 {
@@ -408,7 +439,12 @@ func (t *Loki) initQuerier() (services.Service, error) {
return nil, err
}
- t.Querier, err = querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.Overrides, deleteStore, logger)
+ querierStore, err := t.getQuerierStore()
+ if err != nil {
+ return nil, err
+ }
+
+ t.Querier, err = querier.New(t.Cfg.Querier, querierStore, t.ingesterQuerier, t.Overrides, deleteStore, logger)
if err != nil {
return nil, err
}
@@ -1882,21 +1918,11 @@ func (t *Loki) initBlockScheduler() (services.Service, error) {
}
func (t *Loki) initDataObjExplorer() (services.Service, error) {
- schema, err := t.Cfg.SchemaConfig.SchemaForTime(model.Now())
- if err != nil {
- return nil, fmt.Errorf("failed to get schema for now: %w", err)
- }
-
- var store objstore.Bucket
- store, err = bucket.NewClient(context.Background(), schema.ObjectType, t.Cfg.StorageConfig.ObjectStore.Config, "dataobj-explorer", util_log.Logger)
+ store, err := t.createDataObjBucket("dataobj-explorer")
if err != nil {
return nil, err
}
- if t.Cfg.DataObjExplorer.StorageBucketPrefix != "" {
- store = objstore.NewPrefixedBucket(store, t.Cfg.DataObjExplorer.StorageBucketPrefix)
- }
-
explorer, err := explorer.New(store, util_log.Logger)
if err != nil {
return nil, err
@@ -1910,19 +1936,15 @@ func (t *Loki) initDataObjConsumer() (services.Service, error) {
if !t.Cfg.Ingester.KafkaIngestion.Enabled {
return nil, nil
}
- schema, err := t.Cfg.SchemaConfig.SchemaForTime(model.Now())
- if err != nil {
- return nil, fmt.Errorf("failed to get schema for now: %w", err)
- }
-
- store, err := bucket.NewClient(context.Background(), schema.ObjectType, t.Cfg.StorageConfig.ObjectStore.Config, "dataobj", util_log.Logger)
+ store, err := t.createDataObjBucket("dataobj-consumer")
if err != nil {
return nil, err
}
+
level.Info(util_log.Logger).Log("msg", "initializing dataobj consumer", "instance", t.Cfg.Ingester.LifecyclerConfig.ID)
t.dataObjConsumer = consumer.New(
t.Cfg.KafkaConfig,
- t.Cfg.DataObjConsumer,
+ t.Cfg.DataObj.Consumer,
t.Cfg.Distributor.TenantTopic.TopicPrefix,
store,
t.Cfg.Ingester.LifecyclerConfig.ID,
@@ -1934,6 +1956,24 @@ func (t *Loki) initDataObjConsumer() (services.Service, error) {
return t.dataObjConsumer, nil
}
+func (t *Loki) createDataObjBucket(name string) (objstore.Bucket, error) {
+ schema, err := t.Cfg.SchemaConfig.SchemaForTime(model.Now())
+ if err != nil {
+ return nil, fmt.Errorf("failed to get schema for now: %w", err)
+ }
+ var objstoreBucket objstore.Bucket
+ objstoreBucket, err = bucket.NewClient(context.Background(), schema.ObjectType, t.Cfg.StorageConfig.ObjectStore.Config, name, util_log.Logger)
+ if err != nil {
+ return nil, err
+ }
+
+ if t.Cfg.DataObj.StorageBucketPrefix != "" {
+ objstoreBucket = objstore.NewPrefixedBucket(objstoreBucket, t.Cfg.DataObj.StorageBucketPrefix)
+ }
+
+ return objstoreBucket, nil
+}
+
func (t *Loki) deleteRequestsClient(clientType string, limits limiter.CombinedLimits) (deletion.DeleteRequestsClient, error) {
if !t.supportIndexDeleteRequest() || !t.Cfg.CompactorConfig.RetentionEnabled {
return deletion.NewNoOpDeleteRequestsStore(), nil
@@ -1967,7 +2007,12 @@ func (t *Loki) deleteRequestsClient(clientType string, limits limiter.CombinedLi
}
func (t *Loki) createRulerQueryEngine(logger log.Logger, deleteStore deletion.DeleteRequestsClient) (eng *logql.Engine, err error) {
- q, err := querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.Overrides, deleteStore, logger)
+ querierStore, err := t.getQuerierStore()
+ if err != nil {
+ return nil, err
+ }
+
+ q, err := querier.New(t.Cfg.Querier, querierStore, t.ingesterQuerier, t.Overrides, deleteStore, logger)
if err != nil {
return nil, fmt.Errorf("could not create querier: %w", err)
}
diff --git a/pkg/querier/store_combiner.go b/pkg/querier/store_combiner.go
new file mode 100644
index 0000000000000..1d4be197d9bf3
--- /dev/null
+++ b/pkg/querier/store_combiner.go
@@ -0,0 +1,344 @@
+package querier
+
+import (
+ "context"
+ "sort"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/grafana/loki/v3/pkg/iter"
+ "github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/logql"
+ "github.com/grafana/loki/v3/pkg/storage/chunk"
+ "github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume"
+ "github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
+)
+
+var _ Store = &StoreCombiner{}
+
+// StoreConfig represents a store and its time range configuration
+type StoreConfig struct {
+ Store Store
+ From model.Time // queries >= From will use this store
+}
+
+// StoreCombiner combines multiple stores and routes queries to the appropriate store based on time range
+type StoreCombiner struct {
+ stores []StoreConfig
+}
+
+// NewStoreCombiner creates a new StoreCombiner with the given store configurations.
+// The stores should be provided in order from newest to oldest time ranges.
+func NewStoreCombiner(stores []StoreConfig) *StoreCombiner {
+ // Sort stores by From time in ascending order to ensure proper time range matching
+ sort.Slice(stores, func(i, j int) bool {
+ return stores[i].From < stores[j].From
+ })
+ return &StoreCombiner{stores: stores}
+}
+
+// findStoresForTimeRange returns the stores that should handle the given time range
+func (sc *StoreCombiner) findStoresForTimeRange(from, through model.Time) []storeWithRange {
+ if len(sc.stores) == 0 {
+ return nil
+ }
+
+ // first, find the schema with the highest start _before or at_ from
+ i := sort.Search(len(sc.stores), func(i int) bool {
+ return sc.stores[i].From > from
+ })
+ if i > 0 {
+ i--
+ } else {
+ // This could happen if we get passed a sample from before 1970.
+ i = 0
+ from = sc.stores[0].From
+ }
+
+ // next, find the schema with the lowest start _after_ through
+ j := sort.Search(len(sc.stores), func(j int) bool {
+ return sc.stores[j].From > through
+ })
+
+ var stores []storeWithRange
+ start := from
+ for ; i < j; i++ {
+ nextSchemaStarts := model.Latest
+ if i+1 < len(sc.stores) {
+ nextSchemaStarts = sc.stores[i+1].From
+ }
+
+ end := min(through, nextSchemaStarts-1)
+ stores = append(stores, storeWithRange{
+ store: sc.stores[i].Store,
+ from: start,
+ through: end,
+ })
+
+ start = nextSchemaStarts
+ }
+
+ return stores
+}
+
+type storeWithRange struct {
+ store Store
+ from, through model.Time
+}
+
+// SelectSamples implements Store
+func (sc *StoreCombiner) SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) {
+ stores := sc.findStoresForTimeRange(model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()))
+
+ if len(stores) == 0 {
+ return iter.NoopSampleIterator, nil
+ }
+
+ if len(stores) == 1 {
+ return stores[0].store.SelectSamples(ctx, req)
+ }
+
+ iters := make([]iter.SampleIterator, 0, len(stores))
+ for _, s := range stores {
+ reqCopy := req
+ reqCopy.Start = s.from.Time()
+ reqCopy.End = s.through.Time()
+
+ iter, err := s.store.SelectSamples(ctx, reqCopy)
+ if err != nil {
+ return nil, err
+ }
+ iters = append(iters, iter)
+ }
+
+ return iter.NewMergeSampleIterator(ctx, iters), nil
+}
+
+// SelectLogs implements Store
+func (sc *StoreCombiner) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) {
+ stores := sc.findStoresForTimeRange(model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()))
+
+ if len(stores) == 0 {
+ return iter.NoopEntryIterator, nil
+ }
+
+ if len(stores) == 1 {
+ return stores[0].store.SelectLogs(ctx, req)
+ }
+
+ iters := make([]iter.EntryIterator, 0, len(stores))
+ for _, s := range stores {
+ reqCopy := req
+ reqCopy.Start = s.from.Time()
+ reqCopy.End = s.through.Time()
+
+ iter, err := s.store.SelectLogs(ctx, reqCopy)
+ if err != nil {
+ return nil, err
+ }
+ iters = append(iters, iter)
+ }
+
+ return iter.NewMergeEntryIterator(ctx, iters, req.Direction), nil
+}
+
+// SelectSeries implements Store
+func (sc *StoreCombiner) SelectSeries(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) {
+ stores := sc.findStoresForTimeRange(model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()))
+
+ if len(stores) == 0 {
+ return nil, nil
+ }
+
+ if len(stores) == 1 {
+ return stores[0].store.SelectSeries(ctx, req)
+ }
+
+ // Use a map to deduplicate series across stores
+ uniqueSeries := make(map[uint64]struct{})
+ var result []logproto.SeriesIdentifier
+
+ // The buffers are used by `series.Hash`.
+ b := make([]byte, 0, 1024)
+ var key uint64
+
+ for _, s := range stores {
+ reqCopy := req
+ reqCopy.Start = s.from.Time()
+ reqCopy.End = s.through.Time()
+
+ series, err := s.store.SelectSeries(ctx, reqCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, s := range series {
+ key = s.Hash(b)
+ if _, ok := uniqueSeries[key]; !ok {
+ result = append(result, s)
+ uniqueSeries[key] = struct{}{}
+ }
+ }
+ }
+
+ return result, nil
+}
+
+// LabelValuesForMetricName implements Store
+func (sc *StoreCombiner) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) {
+ stores := sc.findStoresForTimeRange(from, through)
+
+ if len(stores) == 0 {
+ return nil, nil
+ }
+
+ if len(stores) == 1 {
+ return stores[0].store.LabelValuesForMetricName(ctx, userID, from, through, metricName, labelName, matchers...)
+ }
+
+ // Use a map to deduplicate values across stores
+ valueSet := make(map[string]struct{})
+
+ for _, s := range stores {
+ values, err := s.store.LabelValuesForMetricName(ctx, userID, s.from, s.through, metricName, labelName, matchers...)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, v := range values {
+ valueSet[v] = struct{}{}
+ }
+ }
+
+ result := make([]string, 0, len(valueSet))
+ for v := range valueSet {
+ result = append(result, v)
+ }
+ sort.Strings(result)
+ return result, nil
+}
+
+// LabelNamesForMetricName implements Store
+func (sc *StoreCombiner) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, matchers ...*labels.Matcher) ([]string, error) {
+ stores := sc.findStoresForTimeRange(from, through)
+
+ if len(stores) == 0 {
+ return nil, nil
+ }
+
+ if len(stores) == 1 {
+ return stores[0].store.LabelNamesForMetricName(ctx, userID, from, through, metricName, matchers...)
+ }
+
+ // Use a map to deduplicate names across stores
+ nameSet := make(map[string]struct{})
+
+ for _, s := range stores {
+ names, err := s.store.LabelNamesForMetricName(ctx, userID, s.from, s.through, metricName, matchers...)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, n := range names {
+ nameSet[n] = struct{}{}
+ }
+ }
+
+ result := make([]string, 0, len(nameSet))
+ for n := range nameSet {
+ result = append(result, n)
+ }
+ sort.Strings(result)
+ return result, nil
+}
+
+// Stats implements Store
+func (sc *StoreCombiner) Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error) {
+ stores := sc.findStoresForTimeRange(from, through)
+
+ if len(stores) == 0 {
+ return &stats.Stats{}, nil
+ }
+
+ if len(stores) == 1 {
+ return stores[0].store.Stats(ctx, userID, from, through, matchers...)
+ }
+
+ // Collect stats from all stores
+ statsSlice := make([]*stats.Stats, 0, len(stores))
+ for _, s := range stores {
+ stats, err := s.store.Stats(ctx, userID, s.from, s.through, matchers...)
+ if err != nil {
+ return nil, err
+ }
+ statsSlice = append(statsSlice, stats)
+ }
+
+ // Merge all stats using the MergeStats function
+ mergedStats := stats.MergeStats(statsSlice...)
+ return &mergedStats, nil
+}
+
+// Volume implements Store
+func (sc *StoreCombiner) Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
+ stores := sc.findStoresForTimeRange(from, through)
+
+ if len(stores) == 0 {
+ return &logproto.VolumeResponse{}, nil
+ }
+
+ if len(stores) == 1 {
+ return stores[0].store.Volume(ctx, userID, from, through, limit, targetLabels, aggregateBy, matchers...)
+ }
+
+ // Combine volumes from all stores
+ volumes := make([]*logproto.VolumeResponse, 0, len(stores))
+
+ for _, s := range stores {
+ vol, err := s.store.Volume(ctx, userID, s.from, s.through, limit, targetLabels, aggregateBy, matchers...)
+ if err != nil {
+ return nil, err
+ }
+ volumes = append(volumes, vol)
+ }
+
+ // Use the seriesvolume package's Merge function to properly merge volume responses
+ return seriesvolume.Merge(volumes, limit), nil
+}
+
+// GetShards implements Store
+func (sc *StoreCombiner) GetShards(ctx context.Context, userID string, from, through model.Time, targetBytesPerShard uint64, predicate chunk.Predicate) (*logproto.ShardsResponse, error) {
+ stores := sc.findStoresForTimeRange(from, through)
+
+ if len(stores) == 0 {
+ return &logproto.ShardsResponse{}, nil
+ }
+
+ if len(stores) == 1 {
+ return stores[0].store.GetShards(ctx, userID, from, through, targetBytesPerShard, predicate)
+ }
+
+ // Combine shards from all stores
+ groups := make([]*logproto.ShardsResponse, 0, len(stores))
+
+ for _, s := range stores {
+ shards, err := s.store.GetShards(ctx, userID, s.from, s.through, targetBytesPerShard, predicate)
+ if err != nil {
+ return nil, err
+ }
+ groups = append(groups, shards)
+ }
+
+ switch {
+ case len(groups) == 1:
+ return groups[0], nil
+ case len(groups) == 0:
+ return nil, nil
+ default:
+ sort.Slice(groups, func(i, j int) bool {
+ return len(groups[i].Shards) > len(groups[j].Shards)
+ })
+ return groups[0], nil
+ }
+}
diff --git a/pkg/querier/store_combiner_test.go b/pkg/querier/store_combiner_test.go
new file mode 100644
index 0000000000000..54a000e64836c
--- /dev/null
+++ b/pkg/querier/store_combiner_test.go
@@ -0,0 +1,487 @@
+package querier
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/iter"
+ "github.com/grafana/loki/v3/pkg/logproto"
+ "github.com/grafana/loki/v3/pkg/logql"
+ "github.com/grafana/loki/v3/pkg/storage/chunk"
+ "github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
+)
+
+func TestStoreCombiner_findStoresForTimeRange(t *testing.T) {
+ tests := []struct {
+ name string
+ stores []StoreConfig
+ from model.Time
+ through model.Time
+ expected []storeWithRange
+ wantEmpty bool
+ }{
+ {
+ name: "empty stores",
+ stores: nil,
+ from: model.Time(100),
+ through: model.Time(200),
+ wantEmpty: true,
+ },
+ {
+ name: "single store covers entire range",
+ stores: []StoreConfig{
+ {From: model.Time(0)},
+ },
+ from: model.Time(100),
+ through: model.Time(200),
+ expected: []storeWithRange{
+ {from: model.Time(100), through: model.Time(200)},
+ },
+ },
+ {
+ name: "query range before any store",
+ stores: []StoreConfig{
+ {From: model.Time(100)},
+ },
+ from: model.Time(0),
+ through: model.Time(50),
+ wantEmpty: true,
+ },
+ {
+ name: "query range spans multiple stores",
+ stores: []StoreConfig{
+ {From: model.Time(200)},
+ {From: model.Time(100)},
+ {From: model.Time(0)},
+ },
+ from: model.Time(150),
+ through: model.Time(250),
+ expected: []storeWithRange{
+ {from: model.Time(150), through: model.Time(199)},
+ {from: model.Time(200), through: model.Time(250)},
+ },
+ },
+ {
+ name: "query range exactly matches store boundaries",
+ stores: []StoreConfig{
+ {From: model.Time(200)},
+ {From: model.Time(100)},
+ },
+ from: model.Time(100),
+ through: model.Time(200),
+ expected: []storeWithRange{
+ {from: model.Time(100), through: model.Time(199)},
+ {from: model.Time(200), through: model.Time(200)},
+ },
+ },
+ {
+ name: "pre-1970 dates",
+ stores: []StoreConfig{
+ {From: model.Time(100)},
+ {From: model.Time(0)},
+ {From: model.Time(-100)},
+ },
+ from: model.Time(-50),
+ through: model.Time(50),
+ expected: []storeWithRange{
+ {from: model.Time(-50), through: model.Time(-1)},
+ {from: model.Time(0), through: model.Time(50)},
+ },
+ },
+ {
+ name: "query range spans all stores",
+ stores: []StoreConfig{
+ {From: model.Time(300)},
+ {From: model.Time(200)},
+ {From: model.Time(100)},
+ },
+ from: model.Time(50),
+ through: model.Time(350),
+ expected: []storeWithRange{
+ {from: model.Time(100), through: model.Time(199)},
+ {from: model.Time(200), through: model.Time(299)},
+ {from: model.Time(300), through: model.Time(350)},
+ },
+ },
+ {
+ name: "query range in future",
+ stores: []StoreConfig{
+ {From: model.Time(100)},
+ {From: model.Time(0)},
+ },
+ from: model.Time(200),
+ through: model.Time(300),
+ expected: []storeWithRange{
+ {from: model.Time(200), through: model.Time(300)},
+ },
+ },
+ {
+ name: "store with 0 from",
+ stores: []StoreConfig{
+ {From: model.Time(0)},
+ {From: model.Time(100)},
+ },
+ from: model.Time(0),
+ through: model.Time(300),
+ expected: []storeWithRange{
+ {from: model.Time(0), through: model.Time(99)},
+ {from: model.Time(100), through: model.Time(300)},
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ sc := NewStoreCombiner(tc.stores)
+ got := sc.findStoresForTimeRange(tc.from, tc.through)
+
+ if tc.wantEmpty {
+ require.Empty(t, got)
+ return
+ }
+
+ require.Equal(t, len(tc.expected), len(got), "number of store ranges", tc.expected, got)
+ for i := range tc.expected {
+ require.Equal(t, tc.expected[i].from, got[i].from, "from time for store %d", i)
+ require.Equal(t, tc.expected[i].through, got[i].through, "through time for store %d", i)
+ }
+ })
+ }
+}
+
+func TestStoreCombiner_StoreOrdering(t *testing.T) {
+ unorderedStores := []StoreConfig{
+ {From: model.Time(100)},
+ {From: model.Time(300)},
+ {From: model.Time(200)},
+ }
+
+ sc := NewStoreCombiner(unorderedStores)
+
+ // Verify stores are sorted in ascending order
+ for i := 1; i < len(sc.stores); i++ {
+ require.True(t, sc.stores[i-1].From < sc.stores[i].From,
+ "stores should be sorted in ascending order, but found %v before %v",
+ time.Unix(int64(sc.stores[i-1].From), 0),
+ time.Unix(int64(sc.stores[i].From), 0))
+ }
+}
+
+func TestStoreCombiner_TimeRangeBoundaries(t *testing.T) {
+ stores := []StoreConfig{
+ {From: model.Time(300)},
+ {From: model.Time(200)},
+ {From: model.Time(100)},
+ }
+
+ tests := []struct {
+ name string
+ from, through model.Time
+ expectedRanges [][2]model.Time // pairs of [from, through]
+ }{
+ {
+ name: "exact boundaries",
+ from: model.Time(200),
+ through: model.Time(300),
+ expectedRanges: [][2]model.Time{
+ {model.Time(200), model.Time(299)},
+ {model.Time(300), model.Time(300)},
+ },
+ },
+ {
+ name: "overlapping boundaries",
+ from: model.Time(250),
+ through: model.Time(350),
+ expectedRanges: [][2]model.Time{
+ {model.Time(250), model.Time(299)},
+ {model.Time(300), model.Time(350)},
+ },
+ },
+ {
+ name: "within single store",
+ from: model.Time(210),
+ through: model.Time(290),
+ expectedRanges: [][2]model.Time{
+ {model.Time(210), model.Time(290)},
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ sc := NewStoreCombiner(stores)
+ ranges := sc.findStoresForTimeRange(tc.from, tc.through)
+
+ require.Equal(t, len(tc.expectedRanges), len(ranges), "number of time ranges")
+ for i, expected := range tc.expectedRanges {
+ require.Equal(t, expected[0], ranges[i].from, "from time for range %d", i)
+ require.Equal(t, expected[1], ranges[i].through, "through time for range %d", i)
+ }
+ })
+ }
+}
+
+type mockStore struct {
+ logs []logproto.Stream
+ series []logproto.SeriesIdentifier
+ stats *stats.Stats
+ shards *logproto.ShardsResponse
+ samples []logproto.Sample
+ labelValues []string
+ labelNames []string
+ volumeResult *logproto.VolumeResponse
+}
+
+func (m *mockStore) SelectLogs(_ context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) {
+ streams := make([]logproto.Stream, len(m.logs))
+ copy(streams, m.logs)
+ return iter.NewStreamsIterator(streams, req.Direction), nil
+}
+
+func (m *mockStore) SelectSeries(_ context.Context, _ logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) {
+ return m.series, nil
+}
+
+func (m *mockStore) Stats(_ context.Context, _ string, _ model.Time, _ model.Time, _ ...*labels.Matcher) (*stats.Stats, error) {
+ return m.stats, nil
+}
+
+func (m *mockStore) GetShards(_ context.Context, _ string, _ model.Time, _ model.Time, _ uint64, _ chunk.Predicate) (*logproto.ShardsResponse, error) {
+ return m.shards, nil
+}
+
+func (m *mockStore) SelectSamples(_ context.Context, _ logql.SelectSampleParams) (iter.SampleIterator, error) {
+ return iter.NewSeriesIterator(logproto.Series{
+ Labels: "",
+ Samples: m.samples,
+ }), nil
+}
+
+func (m *mockStore) LabelValuesForMetricName(_ context.Context, _ string, _ model.Time, _ model.Time, _ string, _ string, _ ...*labels.Matcher) ([]string, error) {
+ return m.labelValues, nil
+}
+
+func (m *mockStore) LabelNamesForMetricName(_ context.Context, _ string, _ model.Time, _ model.Time, _ string, _ ...*labels.Matcher) ([]string, error) {
+ return m.labelNames, nil
+}
+
+func (m *mockStore) Volume(_ context.Context, _ string, _ model.Time, _ model.Time, _ int32, _ []string, _ string, _ ...*labels.Matcher) (*logproto.VolumeResponse, error) {
+ return m.volumeResult, nil
+}
+
+func TestStoreCombiner_Merging(t *testing.T) {
+ t.Run("SelectLogs merges streams", func(t *testing.T) {
+ store1 := &mockStore{
+ logs: []logproto.Stream{
+ {Labels: `{app="app1"}`, Entries: []logproto.Entry{{Timestamp: time.Unix(1, 0), Line: "1"}}},
+ },
+ }
+ store2 := &mockStore{
+ logs: []logproto.Stream{
+ {Labels: `{app="app2"}`, Entries: []logproto.Entry{{Timestamp: time.Unix(2, 0), Line: "2"}}},
+ },
+ }
+
+ sc := NewStoreCombiner([]StoreConfig{
+ {Store: store1, From: model.Time(0)},
+ {Store: store2, From: model.Time(2)},
+ })
+
+ iter, err := sc.SelectLogs(context.Background(), logql.SelectLogParams{
+ QueryRequest: &logproto.QueryRequest{
+ Start: time.Unix(0, 0),
+ End: time.Unix(2, 0),
+ Direction: logproto.FORWARD,
+ },
+ })
+ require.NoError(t, err)
+
+ // Convert iterator to streams for testing
+ var streams []logproto.Stream
+ for iter.Next() {
+ stream := logproto.Stream{
+ Labels: iter.Labels(),
+ Entries: []logproto.Entry{iter.At()},
+ }
+ streams = append(streams, stream)
+ }
+ require.NoError(t, iter.Err())
+ require.Len(t, streams, 2)
+ require.Equal(t, `{app="app1"}`, streams[0].Labels)
+ require.Equal(t, `{app="app2"}`, streams[1].Labels)
+ })
+
+ t.Run("SelectSeries deduplicates series", func(t *testing.T) {
+ store1 := &mockStore{
+ series: []logproto.SeriesIdentifier{
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "app", Value: "app1"}}},
+ },
+ }
+ store2 := &mockStore{
+ series: []logproto.SeriesIdentifier{
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "app", Value: "app1"}}}, // Duplicate
+ {Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "app", Value: "app2"}}},
+ },
+ }
+
+ sc := NewStoreCombiner([]StoreConfig{
+ {Store: store1, From: model.Time(0)},
+ {Store: store2, From: model.Time(2)},
+ })
+
+ series, err := sc.SelectSeries(context.Background(), logql.SelectLogParams{
+ QueryRequest: &logproto.QueryRequest{
+ Start: time.Unix(0, 0),
+ End: time.Unix(2, 0),
+ },
+ })
+ require.NoError(t, err)
+ require.Len(t, series, 2) // Should deduplicate app1
+ })
+
+ t.Run("Stats merges stats", func(t *testing.T) {
+ store1 := &mockStore{
+ stats: &stats.Stats{Streams: 1, Chunks: 10, Bytes: 100},
+ }
+ store2 := &mockStore{
+ stats: &stats.Stats{Streams: 2, Chunks: 20, Bytes: 200},
+ }
+
+ sc := NewStoreCombiner([]StoreConfig{
+ {Store: store1, From: model.Time(0)},
+ {Store: store2, From: model.Time(2)},
+ })
+
+ stats, err := sc.Stats(context.Background(), "user", 0, 2)
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), stats.Streams) // 1 + 2
+ require.Equal(t, uint64(30), stats.Chunks) // 10 + 20
+ require.Equal(t, uint64(300), stats.Bytes) // 100 + 200
+ })
+
+ t.Run("GetShards returns largest shard set", func(t *testing.T) {
+ store1 := &mockStore{
+ shards: &logproto.ShardsResponse{
+ Shards: []logproto.Shard{{Bounds: logproto.FPBounds{Min: 2, Max: 4}}},
+ },
+ }
+ store2 := &mockStore{
+ shards: &logproto.ShardsResponse{
+ Shards: []logproto.Shard{{Bounds: logproto.FPBounds{Min: 1, Max: 2}}, {Bounds: logproto.FPBounds{Min: 2, Max: 3}}}, // More shards
+ },
+ }
+
+ sc := NewStoreCombiner([]StoreConfig{
+ {Store: store1, From: model.Time(0)},
+ {Store: store2, From: model.Time(2)},
+ })
+
+ shards, err := sc.GetShards(context.Background(), "user", 0, 2, 1000, chunk.Predicate{})
+ require.NoError(t, err)
+ require.Equal(t, shards.Shards, []logproto.Shard{{Bounds: logproto.FPBounds{Min: 1, Max: 2}}, {Bounds: logproto.FPBounds{Min: 2, Max: 3}}}) // Should pick store2's response
+ })
+
+ t.Run("SelectSamples merges samples", func(t *testing.T) {
+ store1 := &mockStore{
+ samples: []logproto.Sample{
+ {Timestamp: time.Unix(1, 0).UnixNano(), Value: 1.0, Hash: 1},
+ },
+ }
+ store2 := &mockStore{
+ samples: []logproto.Sample{
+ {Timestamp: time.Unix(2, 0).UnixNano(), Value: 2.0, Hash: 2},
+ },
+ }
+
+ sc := NewStoreCombiner([]StoreConfig{
+ {Store: store1, From: model.Time(0)},
+ {Store: store2, From: model.Time(2)},
+ })
+
+ iter, err := sc.SelectSamples(context.Background(), logql.SelectSampleParams{
+ SampleQueryRequest: &logproto.SampleQueryRequest{
+ Start: time.Unix(0, 0),
+ End: time.Unix(2, 0),
+ },
+ })
+ require.NoError(t, err)
+
+ var samples []logproto.Sample
+ for iter.Next() {
+ samples = append(samples, iter.At())
+ }
+ require.NoError(t, iter.Err())
+ require.Len(t, samples, 2)
+ require.Equal(t, float64(1.0), samples[0].Value)
+ require.Equal(t, float64(2.0), samples[1].Value)
+ })
+
+ t.Run("LabelValuesForMetricName deduplicates values", func(t *testing.T) {
+ store1 := &mockStore{
+ labelValues: []string{"value1", "value2"},
+ }
+ store2 := &mockStore{
+ labelValues: []string{"value2", "value3"}, // Note: value2 is duplicate
+ }
+
+ sc := NewStoreCombiner([]StoreConfig{
+ {Store: store1, From: model.Time(0)},
+ {Store: store2, From: model.Time(2)},
+ })
+
+ values, err := sc.LabelValuesForMetricName(context.Background(), "user", 0, 2, "logs", "label")
+ require.NoError(t, err)
+ require.Equal(t, []string{"value1", "value2", "value3"}, values)
+ })
+
+ t.Run("LabelNamesForMetricName deduplicates names", func(t *testing.T) {
+ store1 := &mockStore{
+ labelNames: []string{"name1", "name2"},
+ }
+ store2 := &mockStore{
+ labelNames: []string{"name2", "name3"}, // Note: name2 is duplicate
+ }
+
+ sc := NewStoreCombiner([]StoreConfig{
+ {Store: store1, From: model.Time(0)},
+ {Store: store2, From: model.Time(2)},
+ })
+
+ names, err := sc.LabelNamesForMetricName(context.Background(), "user", 0, 2, "logs")
+ require.NoError(t, err)
+ require.Equal(t, []string{"name1", "name2", "name3"}, names)
+ })
+
+ t.Run("Volume merges responses", func(t *testing.T) {
+ store1 := &mockStore{
+ volumeResult: &logproto.VolumeResponse{
+ Volumes: []logproto.Volume{
+ {Name: "app1", Volume: 100},
+ },
+ },
+ }
+ store2 := &mockStore{
+ volumeResult: &logproto.VolumeResponse{
+ Volumes: []logproto.Volume{
+ {Name: "app2", Volume: 200},
+ },
+ },
+ }
+
+ sc := NewStoreCombiner([]StoreConfig{
+ {Store: store1, From: model.Time(0)},
+ {Store: store2, From: model.Time(2)},
+ })
+
+ result, err := sc.Volume(context.Background(), "user", 0, 2, 10, nil, "")
+ require.NoError(t, err)
+ require.Len(t, result.Volumes, 2)
+ require.Equal(t, uint64(200), result.Volumes[0].Volume)
+ require.Equal(t, uint64(100), result.Volumes[1].Volume)
+ })
+}
diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go
index 2c9a6a4605e84..d9e78ccaed253 100644
--- a/pkg/storage/config/schema_config.go
+++ b/pkg/storage/config/schema_config.go
@@ -210,6 +210,15 @@ func (d *DayTime) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
+func (d *DayTime) Set(value string) error {
+ t, err := time.Parse("2006-01-02", value)
+ if err != nil {
+ return err
+ }
+ d.Time = model.TimeFromUnix(t.Unix())
+ return nil
+}
+
func (d DayTime) String() string {
return d.Time.Time().UTC().Format("2006-01-02")
}
@@ -560,6 +569,7 @@ func (cfg IndexPeriodicTableConfig) MarshalYAML() (interface{}, error) {
return g, nil
}
+
func ValidatePathPrefix(prefix string) error {
if prefix == "" {
return errors.New("prefix must be set")
|
refactor
|
Consolidate configuration and add querier support (#16144)
|
5d027ae3d0d21752d6f4b5e47d0c1025bf0349e1
|
2025-03-10 23:13:38
|
renovate[bot]
|
chore(deps): update dependency @typescript-eslint/parser to v8.26.1 (main) (#16668)
| false
|
diff --git a/pkg/ui/frontend/package-lock.json b/pkg/ui/frontend/package-lock.json
index f546aeb4fec63..1327771544d61 100644
--- a/pkg/ui/frontend/package-lock.json
+++ b/pkg/ui/frontend/package-lock.json
@@ -2828,16 +2828,16 @@
}
},
"node_modules/@typescript-eslint/parser": {
- "version": "8.26.0",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.26.0.tgz",
- "integrity": "sha512-mNtXP9LTVBy14ZF3o7JG69gRPBK/2QWtQd0j0oH26HcY/foyJJau6pNUez7QrM5UHnSvwlQcJXKsk0I99B9pOA==",
+ "version": "8.26.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.26.1.tgz",
+ "integrity": "sha512-w6HZUV4NWxqd8BdeFf81t07d7/YV9s7TCWrQQbG5uhuvGUAW+fq1usZ1Hmz9UPNLniFnD8GLSsDpjP0hm1S4lQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@typescript-eslint/scope-manager": "8.26.0",
- "@typescript-eslint/types": "8.26.0",
- "@typescript-eslint/typescript-estree": "8.26.0",
- "@typescript-eslint/visitor-keys": "8.26.0",
+ "@typescript-eslint/scope-manager": "8.26.1",
+ "@typescript-eslint/types": "8.26.1",
+ "@typescript-eslint/typescript-estree": "8.26.1",
+ "@typescript-eslint/visitor-keys": "8.26.1",
"debug": "^4.3.4"
},
"engines": {
@@ -2852,6 +2852,122 @@
"typescript": ">=4.8.4 <5.9.0"
}
},
+ "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": {
+ "version": "8.26.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.26.1.tgz",
+ "integrity": "sha512-6EIvbE5cNER8sqBu6V7+KeMZIC1664d2Yjt+B9EWUXrsyWpxx4lEZrmvxgSKRC6gX+efDL/UY9OpPZ267io3mg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@typescript-eslint/types": "8.26.1",
+ "@typescript-eslint/visitor-keys": "8.26.1"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": {
+ "version": "8.26.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.26.1.tgz",
+ "integrity": "sha512-n4THUQW27VmQMx+3P+B0Yptl7ydfceUj4ON/AQILAASwgYdZ/2dhfymRMh5egRUrvK5lSmaOm77Ry+lmXPOgBQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": {
+ "version": "8.26.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.26.1.tgz",
+ "integrity": "sha512-yUwPpUHDgdrv1QJ7YQal3cMVBGWfnuCdKbXw1yyjArax3353rEJP1ZA+4F8nOlQ3RfS2hUN/wze3nlY+ZOhvoA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@typescript-eslint/types": "8.26.1",
+ "@typescript-eslint/visitor-keys": "8.26.1",
+ "debug": "^4.3.4",
+ "fast-glob": "^3.3.2",
+ "is-glob": "^4.0.3",
+ "minimatch": "^9.0.4",
+ "semver": "^7.6.0",
+ "ts-api-utils": "^2.0.1"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "typescript": ">=4.8.4 <5.9.0"
+ }
+ },
+ "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": {
+ "version": "8.26.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.26.1.tgz",
+ "integrity": "sha512-AjOC3zfnxd6S4Eiy3jwktJPclqhFHNyd8L6Gycf9WUPoKZpgM5PjkxY1X7uSy61xVpiJDhhk7XT2NVsN3ALTWg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@typescript-eslint/types": "8.26.1",
+ "eslint-visitor-keys": "^4.2.0"
+ },
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/parser/node_modules/brace-expansion": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
+ "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "balanced-match": "^1.0.0"
+ }
+ },
+ "node_modules/@typescript-eslint/parser/node_modules/eslint-visitor-keys": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz",
+ "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/parser/node_modules/minimatch": {
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
+ "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "brace-expansion": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
"node_modules/@typescript-eslint/scope-manager": {
"version": "8.26.0",
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.26.0.tgz",
|
chore
|
update dependency @typescript-eslint/parser to v8.26.1 (main) (#16668)
|
88c671162f70e075f6aa43599aa560fe7b4b5627
|
2024-05-16 21:07:35
|
Karsten Jeschkies
|
fix: Track bytes discarded by ingester. (#12981)
| false
|
diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go
index e8871e7a13918..d530d937d42fe 100644
--- a/pkg/ingester/checkpoint_test.go
+++ b/pkg/ingester/checkpoint_test.go
@@ -70,7 +70,7 @@ func TestIngesterWAL(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -113,7 +113,7 @@ func TestIngesterWAL(t *testing.T) {
expectCheckpoint(t, walDir, false, time.Second)
// restart the ingester
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -127,7 +127,7 @@ func TestIngesterWAL(t *testing.T) {
require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
// restart the ingester
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -150,7 +150,7 @@ func TestIngesterWALIgnoresStreamLimits(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -196,7 +196,7 @@ func TestIngesterWALIgnoresStreamLimits(t *testing.T) {
require.NoError(t, err)
// restart the ingester
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -253,7 +253,7 @@ func TestIngesterWALBackpressureSegments(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -274,7 +274,7 @@ func TestIngesterWALBackpressureSegments(t *testing.T) {
expectCheckpoint(t, walDir, false, time.Second)
// restart the ingester, ensuring we replayed from WAL.
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -295,7 +295,7 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -316,7 +316,7 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) {
require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i))
// restart the ingester, ensuring we can replay from the checkpoint as well.
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
@@ -452,7 +452,7 @@ func Test_SeriesIterator(t *testing.T) {
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
for i := 0; i < 3; i++ {
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
require.Nil(t, err)
require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream1}}))
require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream2}}))
@@ -499,7 +499,7 @@ func Benchmark_SeriesIterator(b *testing.B) {
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
for i := range instances {
- inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil)
+ inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
require.NoError(b,
inst.Push(context.Background(), &logproto.PushRequest{
@@ -591,7 +591,7 @@ func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) {
}
}
- i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -663,7 +663,7 @@ func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) {
require.NoError(t, err)
// restart the ingester
- i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger())
+ i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
require.Nil(t, services.StartAndAwaitRunning(context.Background(), i))
diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go
index e4fc748f2560d..6fd52bafa066f 100644
--- a/pkg/ingester/flush_test.go
+++ b/pkg/ingester/flush_test.go
@@ -278,7 +278,7 @@ func newTestStore(t require.TestingT, cfg Config, walOverride WAL) (*testStore,
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
- ing, err := New(cfg, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokitlog.NewNopLogger())
+ ing, err := New(cfg, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokitlog.NewNopLogger(), nil)
require.NoError(t, err)
require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing))
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 640c64eee6b63..6d27d349c93f4 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -12,6 +12,7 @@ import (
"sync"
"time"
+ "github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logqlmodel/metadata"
"github.com/grafana/loki/v3/pkg/storage/types"
@@ -242,10 +243,12 @@ type Ingester struct {
streamRateCalculator *StreamRateCalculator
writeLogManager *writefailures.Manager
+
+ customStreamsTracker push.UsageTracker
}
// New makes a new Ingester.
-func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger) (*Ingester, error) {
+func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker) (*Ingester, error) {
if cfg.ingesterClientFactory == nil {
cfg.ingesterClientFactory = client.New
}
@@ -273,6 +276,7 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
terminateOnShutdown: false,
streamRateCalculator: NewStreamRateCalculator(),
writeLogManager: writefailures.NewManager(logger, registerer, writeFailuresCfg, configs, "ingester"),
+ customStreamsTracker: customStreamsTracker,
}
i.replayController = newReplayController(metrics, cfg.WAL, &replayFlusher{i})
@@ -863,7 +867,7 @@ func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { /
inst, ok = i.instances[instanceID]
if !ok {
var err error
- inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter, i.pipelineWrapper, i.extractorWrapper, i.streamRateCalculator, i.writeLogManager)
+ inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter, i.pipelineWrapper, i.extractorWrapper, i.streamRateCalculator, i.writeLogManager, i.customStreamsTracker)
if err != nil {
return nil, err
}
diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
index b31053a5ded17..035a62e5a6414 100644
--- a/pkg/ingester/ingester_test.go
+++ b/pkg/ingester/ingester_test.go
@@ -57,7 +57,7 @@ func TestPrepareShutdownMarkerPathNotSet(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -80,7 +80,7 @@ func TestPrepareShutdown(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -141,7 +141,7 @@ func TestIngester_GetStreamRates_Correctness(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -173,7 +173,7 @@ func BenchmarkGetStreamRatesAllocs(b *testing.B) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(b, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -197,7 +197,7 @@ func TestIngester(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -382,7 +382,7 @@ func TestIngesterStreamLimitExceeded(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -740,7 +740,7 @@ func Test_InMemoryLabels(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -794,7 +794,7 @@ func TestIngester_GetDetectedLabels(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -857,7 +857,7 @@ func TestIngester_GetDetectedLabelsWithQuery(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck
@@ -1224,7 +1224,7 @@ func TestStats(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
- i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
i.instances["test"] = defaultInstance(t)
@@ -1251,7 +1251,7 @@ func TestVolume(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
- i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
i.instances["test"] = defaultInstance(t)
@@ -1330,7 +1330,7 @@ func createIngesterServer(t *testing.T, ingesterConfig Config) (ingesterClient,
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
- ing, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ ing, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
listener := bufconn.Listen(1024 * 1024)
diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
index eb98f8a39b630..a4436b9d41915 100644
--- a/pkg/ingester/instance.go
+++ b/pkg/ingester/instance.go
@@ -141,6 +141,7 @@ func newInstance(
extractorWrapper log.SampleExtractorWrapper,
streamRateCalculator *StreamRateCalculator,
writeFailures *writefailures.Manager,
+ customStreamsTracker push.UsageTracker,
) (*instance, error) {
invertedIndex, err := index.NewMultiInvertedIndex(periodConfigs, uint32(cfg.IndexShards))
if err != nil {
@@ -174,6 +175,8 @@ func newInstance(
writeFailures: writeFailures,
schemaconfig: &c,
+
+ customStreamsTracker: customStreamsTracker,
}
i.mapper = NewFPMapper(i.getLabelsFromFingerprint)
return i, err
@@ -241,7 +244,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
continue
}
- _, appendErr = s.Push(ctx, reqStream.Entries, record, 0, false, rateLimitWholeStream)
+ _, appendErr = s.Push(ctx, reqStream.Entries, record, 0, false, rateLimitWholeStream, i.customStreamsTracker)
s.chunkMtx.Unlock()
}
diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go
index 88b613aa8db2d..7f7dc30361d6a 100644
--- a/pkg/ingester/instance_test.go
+++ b/pkg/ingester/instance_test.go
@@ -73,7 +73,7 @@ func TestLabelsCollisions(t *testing.T) {
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
- i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
require.Nil(t, err)
// avoid entries from the future.
@@ -101,7 +101,7 @@ func TestConcurrentPushes(t *testing.T) {
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
require.Nil(t, err)
const (
@@ -153,7 +153,7 @@ func TestGetStreamRates(t *testing.T) {
require.NoError(t, err)
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
require.NoError(t, err)
const (
@@ -247,7 +247,7 @@ func TestSyncPeriod(t *testing.T) {
minUtil = 0.20
)
- inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
require.Nil(t, err)
lbls := makeRandomLabels()
@@ -292,7 +292,7 @@ func setupTestStreams(t *testing.T) (*instance, time.Time, int) {
cfg.SyncMinUtilization = 0.20
cfg.IndexShards = indexShards
- instance, err := newInstance(cfg, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ instance, err := newInstance(cfg, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
require.Nil(t, err)
currentTime := time.Now()
@@ -501,7 +501,7 @@ func Benchmark_PushInstance(b *testing.B) {
require.NoError(b, err)
limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1)
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
ctx := context.Background()
for n := 0; n < b.N; n++ {
@@ -545,7 +545,7 @@ func Benchmark_instance_addNewTailer(b *testing.B) {
ctx := context.Background()
- inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
expr, err := syntax.ParseLogSelector(`{namespace="foo",pod="bar",instance=~"10.*"}`, true)
require.NoError(b, err)
t, err := newTailer("foo", expr, nil, 10)
@@ -1095,7 +1095,8 @@ func TestStreamShardingUsage(t *testing.T) {
})
t.Run("invalid push returns error", func(t *testing.T) {
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ tracker := &mockUsageTracker{}
+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, tracker)
ctx := context.Background()
err = i.Push(ctx, &logproto.PushRequest{
@@ -1111,10 +1112,11 @@ func TestStreamShardingUsage(t *testing.T) {
},
})
require.Error(t, err)
+ require.Equal(t, 3.0, tracker.discardedBytes)
})
t.Run("valid push returns no error", func(t *testing.T) {
- i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil)
+ i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil)
ctx := context.Background()
err = i.Push(ctx, &logproto.PushRequest{
@@ -1449,6 +1451,7 @@ func defaultInstance(t *testing.T) *instance {
nil,
NewStreamRateCalculator(),
nil,
+ nil,
)
require.Nil(t, err)
insertData(t, instance)
@@ -1535,3 +1538,16 @@ func (f fakeQueryServer) Send(res *logproto.QueryResponse) error {
return f(res)
}
func (f fakeQueryServer) Context() context.Context { return context.TODO() }
+
+type mockUsageTracker struct {
+ discardedBytes float64
+}
+
+// DiscardedBytesAdd implements push.UsageTracker.
+func (m *mockUsageTracker) DiscardedBytesAdd(_ context.Context, _ string, _ string, _ labels.Labels, value float64) {
+ m.discardedBytes += value
+}
+
+// ReceivedBytesAdd implements push.UsageTracker.
+func (*mockUsageTracker) ReceivedBytesAdd(_ context.Context, _ string, _ time.Duration, _ labels.Labels, _ float64) {
+}
diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go
index a93151e0e6fca..e8b1c244871bb 100644
--- a/pkg/ingester/recovery.go
+++ b/pkg/ingester/recovery.go
@@ -168,7 +168,7 @@ func (r *ingesterRecoverer) Push(userID string, entries wal.RefEntries) error {
}
// ignore out of order errors here (it's possible for a checkpoint to already have data from the wal segments)
- bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true, false)
+ bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true, false, r.ing.customStreamsTracker)
r.ing.replayController.Add(int64(bytesAdded))
if err != nil && err == ErrEntriesExist {
r.ing.metrics.duplicateEntriesTotal.Add(float64(len(entries.Entries)))
diff --git a/pkg/ingester/recovery_test.go b/pkg/ingester/recovery_test.go
index fd8f05136d6f5..9176ff3c6ad2f 100644
--- a/pkg/ingester/recovery_test.go
+++ b/pkg/ingester/recovery_test.go
@@ -228,7 +228,7 @@ func TestSeriesRecoveryNoDuplicates(t *testing.T) {
chunks: map[string][]chunk.Chunk{},
}
- i, err := New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err := New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
mkSample := func(i int) *logproto.PushRequest {
@@ -262,7 +262,7 @@ func TestSeriesRecoveryNoDuplicates(t *testing.T) {
require.Equal(t, false, iter.Next())
// create a new ingester now
- i, err = New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger())
+ i, err = New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil)
require.NoError(t, err)
// recover the checkpointed series
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index 6bf75dfa1ac54..0aa3c41ea619b 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -19,6 +19,7 @@ import (
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/wal"
"github.com/grafana/loki/v3/pkg/iter"
+ "github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/log"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
@@ -181,6 +182,8 @@ func (s *stream) Push(
lockChunk bool,
// Whether nor not to ingest all at once or not. It is a per-tenant configuration.
rateLimitWholeStream bool,
+
+ usageTracker push.UsageTracker,
) (int, error) {
if lockChunk {
s.chunkMtx.Lock()
@@ -199,7 +202,7 @@ func (s *stream) Push(
return 0, ErrEntriesExist
}
- toStore, invalid := s.validateEntries(entries, isReplay, rateLimitWholeStream)
+ toStore, invalid := s.validateEntries(ctx, entries, isReplay, rateLimitWholeStream, usageTracker)
if rateLimitWholeStream && hasRateLimitErr(invalid) {
return 0, errorForFailedEntries(s, invalid, len(entries))
}
@@ -213,7 +216,7 @@ func (s *stream) Push(
s.metrics.chunkCreatedStats.Inc(1)
}
- bytesAdded, storedEntries, entriesWithErr := s.storeEntries(ctx, toStore)
+ bytesAdded, storedEntries, entriesWithErr := s.storeEntries(ctx, toStore, usageTracker)
s.recordAndSendToTailers(record, storedEntries)
if len(s.chunks) != prevNumChunks {
@@ -313,7 +316,7 @@ func (s *stream) recordAndSendToTailers(record *wal.Record, entries []logproto.E
}
}
-func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry) (int, []logproto.Entry, []entryWithError) {
+func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usageTracker push.UsageTracker) (int, []logproto.Entry, []entryWithError) {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "stream started to store entries", "labels", s.labelsString)
defer sp.LogKV("event", "stream finished to store entries")
@@ -350,11 +353,12 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry) (in
bytesAdded += len(entries[i].Line)
storedEntries = append(storedEntries, entries[i])
}
- s.reportMetrics(outOfOrderSamples, outOfOrderBytes, 0, 0)
+ s.reportMetrics(ctx, outOfOrderSamples, outOfOrderBytes, 0, 0, usageTracker)
return bytesAdded, storedEntries, invalid
}
-func (s *stream) validateEntries(entries []logproto.Entry, isReplay, rateLimitWholeStream bool) ([]logproto.Entry, []entryWithError) {
+func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry, isReplay, rateLimitWholeStream bool, usageTracker push.UsageTracker) ([]logproto.Entry, []entryWithError) {
+
var (
outOfOrderSamples, outOfOrderBytes int
rateLimitedSamples, rateLimitedBytes int
@@ -427,11 +431,11 @@ func (s *stream) validateEntries(entries []logproto.Entry, isReplay, rateLimitWh
}
s.streamRateCalculator.Record(s.tenant, s.labelHash, s.labelHashNoShard, totalBytes)
- s.reportMetrics(outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes)
+ s.reportMetrics(ctx, outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes, usageTracker)
return toStore, failedEntriesWithError
}
-func (s *stream) reportMetrics(outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes int) {
+func (s *stream) reportMetrics(ctx context.Context, outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes int, usageTracker push.UsageTracker) {
if outOfOrderSamples > 0 {
name := validation.OutOfOrder
if s.unorderedWrites {
@@ -439,10 +443,16 @@ func (s *stream) reportMetrics(outOfOrderSamples, outOfOrderBytes, rateLimitedSa
}
validation.DiscardedSamples.WithLabelValues(name, s.tenant).Add(float64(outOfOrderSamples))
validation.DiscardedBytes.WithLabelValues(name, s.tenant).Add(float64(outOfOrderBytes))
+ if usageTracker != nil {
+ usageTracker.DiscardedBytesAdd(ctx, s.tenant, name, s.labels, float64(outOfOrderBytes))
+ }
}
if rateLimitedSamples > 0 {
validation.DiscardedSamples.WithLabelValues(validation.StreamRateLimit, s.tenant).Add(float64(rateLimitedSamples))
validation.DiscardedBytes.WithLabelValues(validation.StreamRateLimit, s.tenant).Add(float64(rateLimitedBytes))
+ if usageTracker != nil {
+ usageTracker.DiscardedBytesAdd(ctx, s.tenant, validation.StreamRateLimit, s.labels, float64(rateLimitedBytes))
+ }
}
}
diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go
index af877bf88da9e..e4dd4a37ab355 100644
--- a/pkg/ingester/stream_test.go
+++ b/pkg/ingester/stream_test.go
@@ -73,7 +73,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
_, err := s.Push(context.Background(), []logproto.Entry{
{Timestamp: time.Unix(int64(numLogs), 0), Line: "log"},
- }, recordPool.GetRecord(), 0, true, false)
+ }, recordPool.GetRecord(), 0, true, false, nil)
require.NoError(t, err)
newLines := make([]logproto.Entry, numLogs)
@@ -94,7 +94,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
fmt.Fprintf(&expected, "user 'fake', total ignored: %d out of %d for stream: {foo=\"bar\"}", numLogs, numLogs)
expectErr := httpgrpc.Errorf(http.StatusBadRequest, expected.String())
- _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true, false)
+ _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true, false, nil)
require.Error(t, err)
require.Equal(t, expectErr.Error(), err.Error())
})
@@ -128,7 +128,7 @@ func TestPushDeduplication(t *testing.T) {
{Timestamp: time.Unix(1, 0), Line: "test"},
{Timestamp: time.Unix(1, 0), Line: "test"},
{Timestamp: time.Unix(1, 0), Line: "newer, better test"},
- }, recordPool.GetRecord(), 0, true, false)
+ }, recordPool.GetRecord(), 0, true, false, nil)
require.NoError(t, err)
require.Len(t, s.chunks, 1)
require.Equal(t, s.chunks[0].chunk.Size(), 2,
@@ -164,7 +164,7 @@ func TestPushRejectOldCounter(t *testing.T) {
{Timestamp: time.Unix(1, 0), Line: "test"},
{Timestamp: time.Unix(1, 0), Line: "test"},
{Timestamp: time.Unix(1, 0), Line: "newer, better test"},
- }, recordPool.GetRecord(), 0, true, false)
+ }, recordPool.GetRecord(), 0, true, false, nil)
require.NoError(t, err)
require.Len(t, s.chunks, 1)
require.Equal(t, s.chunks[0].chunk.Size(), 2,
@@ -173,13 +173,13 @@ func TestPushRejectOldCounter(t *testing.T) {
// fail to push with a counter <= the streams internal counter
_, err = s.Push(context.Background(), []logproto.Entry{
{Timestamp: time.Unix(1, 0), Line: "test"},
- }, recordPool.GetRecord(), 2, true, false)
+ }, recordPool.GetRecord(), 2, true, false, nil)
require.Equal(t, ErrEntriesExist, err)
// succeed with a greater counter
_, err = s.Push(context.Background(), []logproto.Entry{
{Timestamp: time.Unix(1, 0), Line: "test"},
- }, recordPool.GetRecord(), 3, true, false)
+ }, recordPool.GetRecord(), 3, true, false, nil)
require.Nil(t, err)
}
@@ -270,9 +270,12 @@ func TestEntryErrorCorrectlyReported(t *testing.T) {
{Line: "observability", Timestamp: time.Now().AddDate(-1 /* year */, 0 /* month */, 0 /* day */)},
{Line: "short", Timestamp: time.Now()},
}
- _, failed := s.validateEntries(entries, false, true)
+ tracker := &mockUsageTracker{}
+
+ _, failed := s.validateEntries(context.Background(), entries, false, true, tracker)
require.NotEmpty(t, failed)
require.False(t, hasRateLimitErr(failed))
+ require.Equal(t, 13.0, tracker.discardedBytes)
}
func TestUnorderedPush(t *testing.T) {
@@ -340,7 +343,7 @@ func TestUnorderedPush(t *testing.T) {
if x.cutBefore {
_ = s.cutChunk(context.Background())
}
- written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true, false)
+ written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true, false, nil)
if x.err {
require.NotNil(t, err)
} else {
@@ -407,9 +410,11 @@ func TestPushRateLimit(t *testing.T) {
{Timestamp: time.Unix(1, 0), Line: "aaaaaaaaab"},
}
// Counter should be 2 now since the first line will be deduped.
- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true)
+ tracker := &mockUsageTracker{}
+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true, tracker)
require.Error(t, err)
require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error())
+ require.Equal(t, 20.0, tracker.discardedBytes)
}
func TestPushRateLimitAllOrNothing(t *testing.T) {
@@ -446,10 +451,12 @@ func TestPushRateLimitAllOrNothing(t *testing.T) {
}
// Both entries have errors because rate limiting is done all at once
- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true)
+ tracker := &mockUsageTracker{}
+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true, tracker)
require.Error(t, err)
require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[0].Line))}).Error())
require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error())
+ require.Equal(t, 20.0, tracker.discardedBytes)
}
func TestReplayAppendIgnoresValidityWindow(t *testing.T) {
@@ -484,7 +491,7 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) {
}
// Push a first entry (it doesn't matter if we look like we're replaying or not)
- _, err = s.Push(context.Background(), entries, nil, 1, true, false)
+ _, err = s.Push(context.Background(), entries, nil, 1, true, false, nil)
require.Nil(t, err)
// Create a sample outside the validity window
@@ -493,11 +500,11 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) {
}
// Pretend it's not a replay, ensure we error
- _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, false)
+ _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, false, nil)
require.NotNil(t, err)
// Now pretend it's a replay. The same write should succeed.
- _, err = s.Push(context.Background(), entries, nil, 2, true, false)
+ _, err = s.Push(context.Background(), entries, nil, 2, true, false, nil)
require.Nil(t, err)
}
@@ -542,7 +549,7 @@ func Benchmark_PushStream(b *testing.B) {
for n := 0; n < b.N; n++ {
rec := recordPool.GetRecord()
- _, err := s.Push(ctx, e, rec, 0, true, false)
+ _, err := s.Push(ctx, e, rec, 0, true, false, nil)
require.NoError(b, err)
recordPool.PutRecord(rec)
}
diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go
index 441c688612d9e..b39f42957360b 100644
--- a/pkg/ingester/tailer.go
+++ b/pkg/ingester/tailer.go
@@ -4,11 +4,11 @@ import (
"encoding/binary"
"hash/fnv"
"sync"
- "sync/atomic"
"time"
"github.com/go-kit/log/level"
"github.com/prometheus/prometheus/model/labels"
+ "go.uber.org/atomic"
"golang.org/x/net/context"
"github.com/grafana/loki/v3/pkg/logproto"
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 3561f89a23187..0280bd514d3c1 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -583,7 +583,7 @@ func (t *Loki) initIngester() (_ services.Service, err error) {
level.Warn(util_log.Logger).Log("msg", "The config setting shutdown marker path is not set. The /ingester/prepare_shutdown endpoint won't work")
}
- t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger)
+ t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker)
if err != nil {
return
}
|
fix
|
Track bytes discarded by ingester. (#12981)
|
6f533ed4386ee2db61680a9021934bfe9a9ba749
|
2024-10-01 16:42:11
|
Periklis Tsirakidis
|
fix(operator): Use empty initiliazed pod status map when no pods (#14314)
| false
|
diff --git a/operator/internal/status/components.go b/operator/internal/status/components.go
index 2d3061ecfc500..3e59f8a198556 100644
--- a/operator/internal/status/components.go
+++ b/operator/internal/status/components.go
@@ -73,6 +73,15 @@ func appendPodStatus(ctx context.Context, k k8s.Client, component, stack, ns str
status := podStatus(&pod)
psm[status] = append(psm[status], pod.Name)
}
+
+ if len(psm) == 0 {
+ psm = lokiv1.PodStatusMap{
+ lokiv1.PodFailed: []string{},
+ lokiv1.PodPending: []string{},
+ lokiv1.PodRunning: []string{},
+ lokiv1.PodReady: []string{},
+ }
+ }
return psm, nil
}
diff --git a/operator/internal/status/components_test.go b/operator/internal/status/components_test.go
index 111074f880031..d43bb2a293150 100644
--- a/operator/internal/status/components_test.go
+++ b/operator/internal/status/components_test.go
@@ -65,6 +65,13 @@ func setupListClient(t *testing.T, stack *lokiv1.LokiStack, componentPods map[st
}
func TestGenerateComponentStatus(t *testing.T) {
+ empty := lokiv1.PodStatusMap{
+ lokiv1.PodFailed: []string{},
+ lokiv1.PodPending: []string{},
+ lokiv1.PodRunning: []string{},
+ lokiv1.PodReady: []string{},
+ }
+
tt := []struct {
desc string
componentPods map[string]*corev1.PodList
@@ -83,14 +90,14 @@ func TestGenerateComponentStatus(t *testing.T) {
manifests.LabelGatewayComponent: {},
},
wantComponentStatus: &lokiv1.LokiStackComponentStatus{
- Compactor: lokiv1.PodStatusMap{},
- Distributor: lokiv1.PodStatusMap{},
- IndexGateway: lokiv1.PodStatusMap{},
- Ingester: lokiv1.PodStatusMap{},
- Querier: lokiv1.PodStatusMap{},
- QueryFrontend: lokiv1.PodStatusMap{},
- Gateway: lokiv1.PodStatusMap{},
- Ruler: lokiv1.PodStatusMap{},
+ Compactor: empty,
+ Distributor: empty,
+ IndexGateway: empty,
+ Ingester: empty,
+ Querier: empty,
+ QueryFrontend: empty,
+ Gateway: empty,
+ Ruler: empty,
},
},
{
@@ -116,6 +123,29 @@ func TestGenerateComponentStatus(t *testing.T) {
Ruler: lokiv1.PodStatusMap{lokiv1.PodRunning: {"ruler-pod-0"}},
},
},
+ {
+ desc: "all pods without ruler",
+ componentPods: map[string]*corev1.PodList{
+ manifests.LabelCompactorComponent: createPodList(manifests.LabelCompactorComponent, false, corev1.PodRunning),
+ manifests.LabelDistributorComponent: createPodList(manifests.LabelDistributorComponent, false, corev1.PodRunning),
+ manifests.LabelIngesterComponent: createPodList(manifests.LabelIngesterComponent, false, corev1.PodRunning),
+ manifests.LabelQuerierComponent: createPodList(manifests.LabelQuerierComponent, false, corev1.PodRunning),
+ manifests.LabelQueryFrontendComponent: createPodList(manifests.LabelQueryFrontendComponent, false, corev1.PodRunning),
+ manifests.LabelIndexGatewayComponent: createPodList(manifests.LabelIndexGatewayComponent, false, corev1.PodRunning),
+ manifests.LabelRulerComponent: {},
+ manifests.LabelGatewayComponent: createPodList(manifests.LabelGatewayComponent, false, corev1.PodRunning),
+ },
+ wantComponentStatus: &lokiv1.LokiStackComponentStatus{
+ Compactor: lokiv1.PodStatusMap{lokiv1.PodRunning: {"compactor-pod-0"}},
+ Distributor: lokiv1.PodStatusMap{lokiv1.PodRunning: {"distributor-pod-0"}},
+ IndexGateway: lokiv1.PodStatusMap{lokiv1.PodRunning: {"index-gateway-pod-0"}},
+ Ingester: lokiv1.PodStatusMap{lokiv1.PodRunning: {"ingester-pod-0"}},
+ Querier: lokiv1.PodStatusMap{lokiv1.PodRunning: {"querier-pod-0"}},
+ QueryFrontend: lokiv1.PodStatusMap{lokiv1.PodRunning: {"query-frontend-pod-0"}},
+ Gateway: lokiv1.PodStatusMap{lokiv1.PodRunning: {"lokistack-gateway-pod-0"}},
+ Ruler: empty,
+ },
+ },
}
for _, tc := range tt {
|
fix
|
Use empty initiliazed pod status map when no pods (#14314)
|
eaf7f34f495195e5c8a938bd4d346c907db6b629
|
2022-09-07 13:16:22
|
Adam
|
bugfix: add cases.NoLower option (#7052)
| false
|
diff --git a/pkg/util/cfg/flag.go b/pkg/util/cfg/flag.go
index af023154463d3..c95798883692c 100644
--- a/pkg/util/cfg/flag.go
+++ b/pkg/util/cfg/flag.go
@@ -94,7 +94,7 @@ func categorizedUsage(fs *flag.FlagSet) func() {
if name == "" {
continue
}
- fmt.Fprintf(fs.Output(), " %s:\n", cases.Title(language.Und).String(name))
+ fmt.Fprintf(fs.Output(), " %s:\n", cases.Title(language.Und, cases.NoLower).String(name))
for _, u := range categories[name] {
fmt.Fprintln(fs.Output(), u)
}
diff --git a/pkg/util/cfg/flag_test.go b/pkg/util/cfg/flag_test.go
index dbce7eb4e63ad..2d4e244edb8fa 100644
--- a/pkg/util/cfg/flag_test.go
+++ b/pkg/util/cfg/flag_test.go
@@ -1,6 +1,7 @@
package cfg
import (
+ "bytes"
"flag"
"testing"
"time"
@@ -56,3 +57,22 @@ func TestFlags(t *testing.T) {
},
}, data)
}
+
+// TestCategorizedUsage checks that the name of every flag can be "Titled" correctly
+func TestCategorizedUsage(t *testing.T) {
+ output := &bytes.Buffer{}
+ fs := flag.NewFlagSet(t.Name(), flag.PanicOnError)
+ fs.SetOutput(output)
+ // "TestAPI" expected
+ fs.String("testAPI.one", "", "")
+ fs.String("testAPI.two", "", "")
+ // "TESTapi" expected
+ fs.String("tESTapi.one", "", "")
+ fs.String("tESTapi.two", "", "")
+ // "TestAPI" expected
+ fs.String("TestAPI.one", "", "")
+ fs.String("TestAPI.two", "", "")
+ categorizedUsage(fs)()
+ expected := "Usage of TestCategorizedUsage:\n\n TestAPI:\n -TestAPI.one string:\n \n -TestAPI.two string:\n \n\n TESTapi:\n -tESTapi.one string:\n \n -tESTapi.two string:\n \n\n TestAPI:\n -testAPI.one string:\n \n -testAPI.two string:\n \n\n"
+ assert.Equal(t, expected, output.String())
+}
|
bugfix
|
add cases.NoLower option (#7052)
|
f2bff77d89cdeb93285e4bd199d49048ff2b9e99
|
2024-12-11 10:45:01
|
Owen Diehl
|
feat(block-scheduler): job tracking & offset commits (#15338)
| false
|
diff --git a/pkg/blockbuilder/builder/builder.go b/pkg/blockbuilder/builder/builder.go
index d8784bf568d99..ad981e3183d0e 100644
--- a/pkg/blockbuilder/builder/builder.go
+++ b/pkg/blockbuilder/builder/builder.go
@@ -250,13 +250,13 @@ func (i *BlockBuilder) runOne(ctx context.Context, workerID string) (bool, error
logger := log.With(
i.logger,
"worker_id", workerID,
- "partition", job.Partition,
- "job_min_offset", job.Offsets.Min,
- "job_max_offset", job.Offsets.Max,
+ "partition", job.Partition(),
+ "job_min_offset", job.Offsets().Min,
+ "job_max_offset", job.Offsets().Max,
)
i.jobsMtx.Lock()
- i.inflightJobs[job.ID] = job
+ i.inflightJobs[job.ID()] = job
i.metrics.inflightJobs.Set(float64(len(i.inflightJobs)))
i.jobsMtx.Unlock()
@@ -284,7 +284,7 @@ func (i *BlockBuilder) runOne(ctx context.Context, workerID string) (bool, error
}
i.jobsMtx.Lock()
- delete(i.inflightJobs, job.ID)
+ delete(i.inflightJobs, job.ID())
i.metrics.inflightJobs.Set(float64(len(i.inflightJobs)))
i.jobsMtx.Unlock()
@@ -315,7 +315,7 @@ func (i *BlockBuilder) processJob(ctx context.Context, job *types.Job, logger lo
"load records",
1,
func(ctx context.Context) error {
- lastOffset, err = i.loadRecords(ctx, job.Partition, job.Offsets, inputCh)
+ lastOffset, err = i.loadRecords(ctx, job.Partition(), job.Offsets(), inputCh)
return err
},
func(ctx context.Context) error {
@@ -323,7 +323,7 @@ func (i *BlockBuilder) processJob(ctx context.Context, job *types.Job, logger lo
"msg", "finished loading records",
"ctx_error", ctx.Err(),
"last_offset", lastOffset,
- "total_records", lastOffset-job.Offsets.Min,
+ "total_records", lastOffset-job.Offsets().Min,
)
close(inputCh)
return nil
@@ -488,7 +488,7 @@ func (i *BlockBuilder) processJob(ctx context.Context, job *types.Job, logger lo
}
}
- if lastOffset <= job.Offsets.Min {
+ if lastOffset <= job.Offsets().Min {
return lastOffset, nil
}
diff --git a/pkg/blockbuilder/scheduler/prioritiy_queue_test.go b/pkg/blockbuilder/scheduler/prioritiy_queue_test.go
index b27d950aa04b0..6845e29dbbb3d 100644
--- a/pkg/blockbuilder/scheduler/prioritiy_queue_test.go
+++ b/pkg/blockbuilder/scheduler/prioritiy_queue_test.go
@@ -7,7 +7,7 @@ import (
)
func TestPriorityQueue(t *testing.T) {
- t.Run("operations", func(t *testing.T) {
+ t.Run("basic operations", func(t *testing.T) {
tests := []struct {
name string
input []int
@@ -33,16 +33,14 @@ func TestPriorityQueue(t *testing.T) {
input: []int{3, 1, 2},
wantPops: []int{1, 2, 3},
},
- {
- name: "duplicate elements",
- input: []int{2, 1, 2, 1},
- wantPops: []int{1, 1, 2, 2},
- },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- pq := NewPriorityQueue[int](func(a, b int) bool { return a < b })
+ pq := NewPriorityQueue[int, int](
+ func(a, b int) bool { return a < b },
+ func(v int) int { return v },
+ )
require.Equal(t, 0, pq.Len())
// Push all elements
@@ -69,15 +67,73 @@ func TestPriorityQueue(t *testing.T) {
}
})
+ t.Run("key operations", func(t *testing.T) {
+ type Job struct {
+ ID string
+ Priority int
+ }
+
+ pq := NewPriorityQueue[string, Job](
+ func(a, b Job) bool { return a.Priority < b.Priority },
+ func(j Job) string { return j.ID },
+ )
+
+ // Test Push with duplicate key
+ job1 := Job{ID: "job1", Priority: 1}
+ job1Updated := Job{ID: "job1", Priority: 3}
+ job2 := Job{ID: "job2", Priority: 2}
+
+ pq.Push(job1)
+ require.Equal(t, 1, pq.Len())
+
+ // Push with same key should update
+ pq.Push(job1Updated)
+ require.Equal(t, 1, pq.Len())
+
+ // Verify updated priority
+ v, ok := pq.Lookup("job1")
+ require.True(t, ok)
+ require.Equal(t, job1Updated, v)
+
+ // Test Remove
+ pq.Push(job2)
+ v, ok = pq.Remove("job1")
+ require.True(t, ok)
+ require.Equal(t, job1Updated, v)
+ require.Equal(t, 1, pq.Len())
+
+ // Test UpdatePriority
+ newJob2 := Job{ID: "job2", Priority: 4}
+ ok = pq.UpdatePriority("job2", newJob2)
+ require.True(t, ok)
+
+ v, ok = pq.Lookup("job2")
+ require.True(t, ok)
+ require.Equal(t, newJob2, v)
+
+ // Test non-existent key operations
+ v, ok = pq.Lookup("nonexistent")
+ require.False(t, ok)
+ require.Zero(t, v)
+
+ v, ok = pq.Remove("nonexistent")
+ require.False(t, ok)
+ require.Zero(t, v)
+
+ ok = pq.UpdatePriority("nonexistent", Job{})
+ require.False(t, ok)
+ })
+
t.Run("custom type", func(t *testing.T) {
type Job struct {
ID string
Priority int
}
- pq := NewPriorityQueue[Job](func(a, b Job) bool {
- return a.Priority < b.Priority
- })
+ pq := NewPriorityQueue[string, Job](
+ func(a, b Job) bool { return a.Priority < b.Priority },
+ func(j Job) string { return j.ID },
+ )
jobs := []Job{
{ID: "high", Priority: 3},
@@ -102,25 +158,28 @@ func TestPriorityQueue(t *testing.T) {
})
t.Run("mixed operations", func(t *testing.T) {
- pq := NewPriorityQueue[int](func(a, b int) bool { return a < b })
+ pq := NewPriorityQueue[int, int](
+ func(a, b int) bool { return a < b },
+ func(v int) int { return v },
+ )
// Push some elements
pq.Push(3)
pq.Push(1)
- require.Equal(t, 2, pq.Len())
+ pq.Push(4)
- // Pop lowest
+ // Pop an element
v, ok := pq.Pop()
require.True(t, ok)
require.Equal(t, 1, v)
// Push more elements
pq.Push(2)
- pq.Push(4)
+ pq.Push(5)
- // Verify remaining elements come out in order
- want := []int{2, 3, 4}
- got := make([]int, 0, 3)
+ // Pop remaining elements and verify order
+ want := []int{2, 3, 4, 5}
+ got := make([]int, 0, len(want))
for range want {
v, ok := pq.Pop()
require.True(t, ok)
@@ -191,3 +250,54 @@ func TestCircularBuffer(t *testing.T) {
})
}
}
+
+func TestCircularBufferLookup(t *testing.T) {
+ t.Run("empty buffer", func(t *testing.T) {
+ cb := NewCircularBuffer[int](5)
+ _, ok := cb.Lookup(func(i int) bool { return i == 1 })
+ require.False(t, ok)
+ })
+
+ t.Run("single element", func(t *testing.T) {
+ cb := NewCircularBuffer[int](5)
+ cb.Push(1)
+ v, ok := cb.Lookup(func(i int) bool { return i == 1 })
+ require.True(t, ok)
+ require.Equal(t, 1, v)
+ })
+
+ t.Run("multiple elements", func(t *testing.T) {
+ cb := NewCircularBuffer[int](5)
+ for i := 1; i <= 3; i++ {
+ cb.Push(i)
+ }
+ v, ok := cb.Lookup(func(i int) bool { return i == 2 })
+ require.True(t, ok)
+ require.Equal(t, 2, v)
+ })
+
+ t.Run("wrapped buffer", func(t *testing.T) {
+ cb := NewCircularBuffer[int](3)
+ // Push 5 elements into a buffer of size 3, causing wrap-around
+ for i := 1; i <= 5; i++ {
+ cb.Push(i)
+ }
+ // Buffer should now contain [4,5,3] with head at index 2
+ v, ok := cb.Lookup(func(i int) bool { return i == 4 })
+ require.True(t, ok)
+ require.Equal(t, 4, v)
+
+ // Element that was evicted should not be found
+ _, ok = cb.Lookup(func(i int) bool { return i == 1 })
+ require.False(t, ok)
+ })
+
+ t.Run("no match", func(t *testing.T) {
+ cb := NewCircularBuffer[int](5)
+ for i := 1; i <= 3; i++ {
+ cb.Push(i)
+ }
+ _, ok := cb.Lookup(func(i int) bool { return i == 99 })
+ require.False(t, ok)
+ })
+}
diff --git a/pkg/blockbuilder/scheduler/priority_queue.go b/pkg/blockbuilder/scheduler/priority_queue.go
index 3b488716cabe8..86b2c795f2eb2 100644
--- a/pkg/blockbuilder/scheduler/priority_queue.go
+++ b/pkg/blockbuilder/scheduler/priority_queue.go
@@ -4,82 +4,142 @@ import (
"container/heap"
)
-// PriorityQueue is a generic priority queue.
-type PriorityQueue[T any] struct {
- h *priorityHeap[T]
+// PriorityQueue is a generic priority queue with constant time lookups.
+type PriorityQueue[K comparable, V any] struct {
+ h *priorityHeap[V]
+ m map[K]*item[V] // Map for constant time lookups
+ key func(V) K // Function to extract key from value
+}
+
+// item represents an item in the priority queue with its index
+type item[V any] struct {
+ value V
+ index int
}
// NewPriorityQueue creates a new priority queue.
-func NewPriorityQueue[T any](less func(T, T) bool) *PriorityQueue[T] {
- h := &priorityHeap[T]{
+func NewPriorityQueue[K comparable, V any](less func(V, V) bool, key func(V) K) *PriorityQueue[K, V] {
+ h := &priorityHeap[V]{
less: less,
- heap: make([]T, 0),
+ heap: make([]*item[V], 0),
}
heap.Init(h)
- return &PriorityQueue[T]{h: h}
+ return &PriorityQueue[K, V]{
+ h: h,
+ m: make(map[K]*item[V]),
+ key: key,
+ }
}
// Push adds an element to the queue.
-func (pq *PriorityQueue[T]) Push(v T) {
- heap.Push(pq.h, v)
+func (pq *PriorityQueue[K, V]) Push(v V) {
+ k := pq.key(v)
+ if existing, ok := pq.m[k]; ok {
+ // Update existing item's value and fix heap
+ existing.value = v
+ heap.Fix(pq.h, existing.index)
+ return
+ }
+
+ // Add new item
+ it := &item[V]{value: v}
+ pq.m[k] = it
+ heap.Push(pq.h, it)
}
// Pop removes and returns the element with the highest priority from the queue.
-func (pq *PriorityQueue[T]) Pop() (T, bool) {
+func (pq *PriorityQueue[K, V]) Pop() (V, bool) {
if pq.Len() == 0 {
- var zero T
+ var zero V
return zero, false
}
- return heap.Pop(pq.h).(T), true
+ it := heap.Pop(pq.h).(*item[V])
+ delete(pq.m, pq.key(it.value))
+ return it.value, true
+}
+
+// Lookup returns the item with the given key if it exists.
+func (pq *PriorityQueue[K, V]) Lookup(k K) (V, bool) {
+ if it, ok := pq.m[k]; ok {
+ return it.value, true
+ }
+ var zero V
+ return zero, false
+}
+
+// Remove removes and returns the item with the given key if it exists.
+func (pq *PriorityQueue[K, V]) Remove(k K) (V, bool) {
+ it, ok := pq.m[k]
+ if !ok {
+ var zero V
+ return zero, false
+ }
+ heap.Remove(pq.h, it.index)
+ delete(pq.m, k)
+ return it.value, true
+}
+
+// UpdatePriority updates the priority of an item and reorders the queue.
+func (pq *PriorityQueue[K, V]) UpdatePriority(k K, v V) bool {
+ if it, ok := pq.m[k]; ok {
+ it.value = v
+ heap.Fix(pq.h, it.index)
+ return true
+ }
+ return false
}
// Len returns the number of elements in the queue.
-func (pq *PriorityQueue[T]) Len() int {
+func (pq *PriorityQueue[K, V]) Len() int {
return pq.h.Len()
}
// priorityHeap is the internal heap implementation that satisfies heap.Interface.
-type priorityHeap[T any] struct {
- less func(T, T) bool
- heap []T
+type priorityHeap[V any] struct {
+ less func(V, V) bool
+ heap []*item[V]
}
-func (h *priorityHeap[T]) Len() int {
+func (h *priorityHeap[V]) Len() int {
return len(h.heap)
}
-func (h *priorityHeap[T]) Less(i, j int) bool {
- return h.less(h.heap[i], h.heap[j])
+func (h *priorityHeap[V]) Less(i, j int) bool {
+ return h.less(h.heap[i].value, h.heap[j].value)
}
-func (h *priorityHeap[T]) Swap(i, j int) {
+func (h *priorityHeap[V]) Swap(i, j int) {
h.heap[i], h.heap[j] = h.heap[j], h.heap[i]
+ h.heap[i].index = i
+ h.heap[j].index = j
}
-func (h *priorityHeap[T]) Push(x any) {
- h.heap = append(h.heap, x.(T))
+func (h *priorityHeap[V]) Push(x any) {
+ it := x.(*item[V])
+ it.index = len(h.heap)
+ h.heap = append(h.heap, it)
}
-func (h *priorityHeap[T]) Pop() any {
+func (h *priorityHeap[V]) Pop() any {
old := h.heap
n := len(old)
- x := old[n-1]
+ it := old[n-1]
h.heap = old[0 : n-1]
- return x
+ return it
}
// CircularBuffer is a generic circular buffer.
-type CircularBuffer[T any] struct {
- buffer []T
+type CircularBuffer[V any] struct {
+ buffer []V
size int
head int
tail int
}
// NewCircularBuffer creates a new circular buffer with the given capacity.
-func NewCircularBuffer[T any](capacity int) *CircularBuffer[T] {
- return &CircularBuffer[T]{
- buffer: make([]T, capacity),
+func NewCircularBuffer[V any](capacity int) *CircularBuffer[V] {
+ return &CircularBuffer[V]{
+ buffer: make([]V, capacity),
size: 0,
head: 0,
tail: 0,
@@ -87,8 +147,8 @@ func NewCircularBuffer[T any](capacity int) *CircularBuffer[T] {
}
// Push adds an element to the circular buffer and returns the evicted element if any
-func (b *CircularBuffer[T]) Push(v T) (T, bool) {
- var evicted T
+func (b *CircularBuffer[V]) Push(v V) (V, bool) {
+ var evicted V
hasEvicted := false
if b.size == len(b.buffer) {
@@ -107,9 +167,9 @@ func (b *CircularBuffer[T]) Push(v T) (T, bool) {
}
// Pop removes and returns the oldest element from the buffer
-func (b *CircularBuffer[T]) Pop() (T, bool) {
+func (b *CircularBuffer[V]) Pop() (V, bool) {
if b.size == 0 {
- var zero T
+ var zero V
return zero, false
}
@@ -121,6 +181,19 @@ func (b *CircularBuffer[T]) Pop() (T, bool) {
}
// Len returns the number of elements in the buffer
-func (b *CircularBuffer[T]) Len() int {
+func (b *CircularBuffer[V]) Len() int {
return b.size
}
+
+// returns the first element in the buffer that satisfies the given predicate
+func (b *CircularBuffer[V]) Lookup(f func(V) bool) (V, bool) {
+ for i := 0; i < b.size; i++ {
+ idx := (b.head + i) % len(b.buffer)
+ if f(b.buffer[idx]) {
+ return b.buffer[idx], true
+ }
+
+ }
+ var zero V
+ return zero, false
+}
diff --git a/pkg/blockbuilder/scheduler/queue.go b/pkg/blockbuilder/scheduler/queue.go
index dab46f164908d..1aeb15e8395e5 100644
--- a/pkg/blockbuilder/scheduler/queue.go
+++ b/pkg/blockbuilder/scheduler/queue.go
@@ -5,138 +5,233 @@ import (
"sync"
"time"
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+
"github.com/grafana/loki/v3/pkg/blockbuilder/types"
)
const (
+ DefaultPriority = 0 // TODO(owen-d): better determine priority when unknown
defaultCompletedJobsCapacity = 100
)
-// JobWithPriority wraps a job with a priority value
-type JobWithPriority[T comparable] struct {
- Job *types.Job
- Priority T
+// JobWithMetadata wraps a job with additional metadata for tracking its lifecycle
+type JobWithMetadata struct {
+ *types.Job
+ Priority int
+ Status types.JobStatus
+ StartTime time.Time
+ UpdateTime time.Time
}
-// NewJobWithPriority creates a new JobWithPriority instance
-func NewJobWithPriority[T comparable](job *types.Job, priority T) *JobWithPriority[T] {
- return &JobWithPriority[T]{
- Job: job,
- Priority: priority,
+// NewJobWithMetadata creates a new JobWithMetadata instance
+func NewJobWithMetadata(job *types.Job, priority int) *JobWithMetadata {
+ return &JobWithMetadata{
+ Job: job,
+ Priority: priority,
+ Status: types.JobStatusPending,
+ UpdateTime: time.Now(),
}
}
-// inProgressJob contains a job and its start time
-type inProgressJob struct {
- job *types.Job
- startTime time.Time
-}
-
-// Duration returns how long the job has been running
-func (j *inProgressJob) Duration() time.Duration {
- return time.Since(j.startTime)
-}
-
// JobQueue manages the queue of pending jobs and tracks their state.
type JobQueue struct {
- pending *PriorityQueue[*JobWithPriority[int]] // Jobs waiting to be processed, ordered by priority
- inProgress map[string]*inProgressJob // Jobs currently being processed, key is job ID
- completed *CircularBuffer[*types.Job] // Last N completed jobs
- statusMap map[string]types.JobStatus // Maps job ID to its current status
+ logger log.Logger
+ pending *PriorityQueue[string, *JobWithMetadata] // Jobs waiting to be processed, ordered by priority
+ inProgress map[string]*JobWithMetadata // Jobs currently being processed
+ completed *CircularBuffer[*JobWithMetadata] // Last N completed jobs
+ statusMap map[string]types.JobStatus // Maps job ID to its current status
mu sync.RWMutex
}
-// NewJobQueue creates a new job queue instance
-func NewJobQueue() *JobQueue {
+func NewJobQueueWithLogger(logger log.Logger) *JobQueue {
return &JobQueue{
- pending: NewPriorityQueue[*JobWithPriority[int]](func(a, b *JobWithPriority[int]) bool {
- return a.Priority > b.Priority // Higher priority first
- }),
- inProgress: make(map[string]*inProgressJob),
- completed: NewCircularBuffer[*types.Job](defaultCompletedJobsCapacity),
+ logger: logger,
+ pending: NewPriorityQueue(
+ func(a, b *JobWithMetadata) bool {
+ return a.Priority > b.Priority // Higher priority first
+ },
+ func(j *JobWithMetadata) string { return j.ID() },
+ ),
+ inProgress: make(map[string]*JobWithMetadata),
+ completed: NewCircularBuffer[*JobWithMetadata](defaultCompletedJobsCapacity),
statusMap: make(map[string]types.JobStatus),
}
}
+// NewJobQueue creates a new job queue instance
+func NewJobQueue() *JobQueue {
+ return NewJobQueueWithLogger(log.NewNopLogger())
+}
+
+// Exists checks if a job exists in any state and returns its status
func (q *JobQueue) Exists(job *types.Job) (types.JobStatus, bool) {
q.mu.RLock()
defer q.mu.RUnlock()
- status, exists := q.statusMap[job.ID]
- return status, exists
+ x, ok := q.existsLockLess(job.ID())
+ if !ok {
+ return types.JobStatusUnknown, false
+ }
+ return x.Status, ok
}
-// Enqueue adds a new job to the pending queue with a priority
+func (q *JobQueue) existsLockLess(id string) (*JobWithMetadata, bool) {
+ status, ok := q.statusMap[id]
+ if !ok {
+ return nil, false
+ }
+
+ switch status {
+ case types.JobStatusPending:
+ return q.pending.Lookup(id)
+ case types.JobStatusInProgress:
+ res, ok := q.inProgress[id]
+ return res, ok
+ case types.JobStatusComplete:
+ return q.completed.Lookup(func(jwm *JobWithMetadata) bool {
+ return jwm.ID() == id
+ })
+ default:
+ return nil, false
+ }
+}
+
+// Enqueue adds a job to the pending queue with the given priority
func (q *JobQueue) Enqueue(job *types.Job, priority int) error {
q.mu.Lock()
defer q.mu.Unlock()
// Check if job already exists
- if status, exists := q.statusMap[job.ID]; exists {
- return fmt.Errorf("job %s already exists with status %v", job.ID, status)
+ if status, exists := q.statusMap[job.ID()]; exists {
+ return fmt.Errorf("job %s already exists with status %v", job.ID(), status)
}
- jobWithPriority := NewJobWithPriority(job, priority)
- q.pending.Push(jobWithPriority)
- q.statusMap[job.ID] = types.JobStatusPending
+ jobMeta := NewJobWithMetadata(job, priority)
+ q.pending.Push(jobMeta)
+ q.statusMap[job.ID()] = types.JobStatusPending
return nil
}
-// Dequeue gets the next available job and assigns it to a builder
-func (q *JobQueue) Dequeue(_ string) (*types.Job, bool, error) {
+// Dequeue removes and returns the highest priority job from the pending queue
+func (q *JobQueue) Dequeue() (*types.Job, bool) {
q.mu.Lock()
defer q.mu.Unlock()
- if q.pending.Len() == 0 {
- return nil, false, nil
- }
-
- jobWithPriority, ok := q.pending.Pop()
+ jobMeta, ok := q.pending.Pop()
if !ok {
- return nil, false, nil
+ return nil, false
}
- // Add to in-progress with current time
- q.inProgress[jobWithPriority.Job.ID] = &inProgressJob{
- job: jobWithPriority.Job,
- startTime: time.Now(),
+ // Update metadata for in-progress state
+ jobMeta.Status = types.JobStatusInProgress
+ jobMeta.StartTime = time.Now()
+ jobMeta.UpdateTime = jobMeta.StartTime
+
+ q.inProgress[jobMeta.ID()] = jobMeta
+ q.statusMap[jobMeta.ID()] = types.JobStatusInProgress
+
+ return jobMeta.Job, true
+}
+
+// GetInProgressJob retrieves a job that is currently being processed
+func (q *JobQueue) GetInProgressJob(id string) (*types.Job, time.Time, bool) {
+ q.mu.RLock()
+ defer q.mu.RUnlock()
+
+ if jobMeta, ok := q.inProgress[id]; ok {
+ return jobMeta.Job, jobMeta.StartTime, true
}
- q.statusMap[jobWithPriority.Job.ID] = types.JobStatusInProgress
+ return nil, time.Time{}, false
+}
- return jobWithPriority.Job, true, nil
+// RemoveInProgress removes a job from the in-progress map
+func (q *JobQueue) RemoveInProgress(id string) {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+
+ delete(q.inProgress, id)
}
-// MarkComplete moves a job from in-progress to completed
-func (q *JobQueue) MarkComplete(jobID string) {
+// MarkComplete moves a job from in-progress to completed with the given status
+func (q *JobQueue) MarkComplete(id string, status types.JobStatus) {
q.mu.Lock()
defer q.mu.Unlock()
- // Find job in in-progress map
- inProgressJob, exists := q.inProgress[jobID]
- // if it doesn't exist, it could be previously removed (duplicate job execution)
- // or the scheduler may have restarted and not have the job state anymore.
- if exists {
- // Remove from in-progress
- delete(q.inProgress, jobID)
+ jobMeta, ok := q.existsLockLess(id)
+ if !ok {
+ level.Error(q.logger).Log("msg", "failed to mark job as complete", "job", id, "status", status)
+ return
}
- // Add to completed buffer and handle evicted job
- if evictedJob, hasEvicted := q.completed.Push(inProgressJob.job); hasEvicted {
- // Remove evicted job from status map
- delete(q.statusMap, evictedJob.ID)
+ switch jobMeta.Status {
+ case types.JobStatusInProgress:
+ // update & remove from in progress
+ delete(q.inProgress, id)
+ case types.JobStatusPending:
+ _, ok := q.pending.Remove(id)
+ if !ok {
+ level.Error(q.logger).Log("msg", "failed to remove job from pending queue", "job", id)
+ }
+ default:
+ level.Error(q.logger).Log("msg", "unknown job status, cannot mark as complete", "job", id, "status", status)
}
- q.statusMap[jobID] = types.JobStatusComplete
+
+ jobMeta.Status = status
+ jobMeta.UpdateTime = time.Now()
+
+ // add it to the completed buffer, removing any evicted job from the statusMap
+ removal, evicted := q.completed.Push(jobMeta)
+ if evicted {
+ delete(q.statusMap, removal.ID())
+ }
+ q.statusMap[id] = status
}
-// SyncJob registers a job as in-progress, used for restoring state after scheduler restarts
-func (q *JobQueue) SyncJob(jobID string, _ string, job *types.Job) {
+// SyncJob registers a job as in-progress or updates its UpdateTime if already in progress
+func (q *JobQueue) SyncJob(jobID string, job *types.Job) {
q.mu.Lock()
defer q.mu.Unlock()
- // Add directly to in-progress
- q.inProgress[jobID] = &inProgressJob{
- job: job,
- startTime: time.Now(),
+ // Helper function to create a new job
+ registerInProgress := func() {
+ // Job does not exist; add it as in-progress
+ now := time.Now()
+ jobMeta := NewJobWithMetadata(job, DefaultPriority)
+ jobMeta.StartTime = now
+ jobMeta.UpdateTime = now
+ jobMeta.Status = types.JobStatusInProgress
+ q.inProgress[jobID] = jobMeta
+ q.statusMap[jobID] = types.JobStatusInProgress
+ }
+
+ jobMeta, ok := q.existsLockLess(jobID)
+
+ if !ok {
+ registerInProgress()
+ return
}
+
+ switch jobMeta.Status {
+ case types.JobStatusPending:
+ // Job already pending, move to in-progress
+ _, ok := q.pending.Remove(jobID)
+ if !ok {
+ level.Error(q.logger).Log("msg", "failed to remove job from pending queue", "job", jobID)
+ }
+ jobMeta.Status = types.JobStatusInProgress
+ case types.JobStatusInProgress:
+ case types.JobStatusComplete, types.JobStatusFailed, types.JobStatusExpired:
+ // Job already completed, re-enqueue a new one
+ registerInProgress()
+ return
+ default:
+ registerInProgress()
+ return
+ }
+
+ jobMeta.UpdateTime = time.Now()
+ q.inProgress[jobID] = jobMeta
q.statusMap[jobID] = types.JobStatusInProgress
}
diff --git a/pkg/blockbuilder/scheduler/queue_test.go b/pkg/blockbuilder/scheduler/queue_test.go
new file mode 100644
index 0000000000000..dfbe07681c62a
--- /dev/null
+++ b/pkg/blockbuilder/scheduler/queue_test.go
@@ -0,0 +1,163 @@
+package scheduler
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/v3/pkg/blockbuilder/types"
+)
+
+func TestJobQueue_SyncJob(t *testing.T) {
+ t.Run("non-existent to in-progress", func(t *testing.T) {
+ q := NewJobQueue()
+ job := types.NewJob(1, types.Offsets{Min: 100, Max: 200})
+ jobID := job.ID()
+
+ beforeSync := time.Now()
+ q.SyncJob(jobID, job)
+ afterSync := time.Now()
+
+ // Verify job is in in-progress map
+ jobMeta, ok := q.inProgress[jobID]
+ require.True(t, ok, "job should be in in-progress map")
+ require.Equal(t, types.JobStatusInProgress, jobMeta.Status)
+ require.True(t, jobMeta.StartTime.After(beforeSync) || jobMeta.StartTime.Equal(beforeSync))
+ require.True(t, jobMeta.StartTime.Before(afterSync) || jobMeta.StartTime.Equal(afterSync))
+ })
+
+ t.Run("pending to in-progress", func(t *testing.T) {
+ q := NewJobQueue()
+ job := types.NewJob(1, types.Offsets{Min: 100, Max: 200})
+
+ // Start with pending job
+ err := q.Enqueue(job, DefaultPriority)
+ require.NoError(t, err)
+
+ beforeSync := time.Now()
+ q.SyncJob(job.ID(), job)
+ afterSync := time.Now()
+
+ // Verify job moved from pending to in-progress
+ _, ok := q.pending.Lookup(job.ID())
+ require.False(t, ok, "job should not be in pending queue")
+
+ jobMeta, ok := q.inProgress[job.ID()]
+ require.True(t, ok, "job should be in in-progress map")
+ require.Equal(t, types.JobStatusInProgress, jobMeta.Status)
+ require.True(t, jobMeta.UpdateTime.After(beforeSync) || jobMeta.UpdateTime.Equal(beforeSync))
+ require.True(t, jobMeta.UpdateTime.Before(afterSync) || jobMeta.UpdateTime.Equal(afterSync))
+ })
+
+ t.Run("already in-progress", func(t *testing.T) {
+ q := NewJobQueue()
+ job := types.NewJob(1, types.Offsets{Min: 100, Max: 200})
+
+ // First sync to put in in-progress
+ q.SyncJob(job.ID(), job)
+ firstUpdate := q.inProgress[job.ID()].UpdateTime
+
+ time.Sleep(time.Millisecond) // Ensure time difference
+ beforeSecondSync := time.Now()
+ q.SyncJob(job.ID(), job)
+ afterSecondSync := time.Now()
+
+ jobMeta := q.inProgress[job.ID()]
+ require.True(t, jobMeta.UpdateTime.After(firstUpdate), "UpdateTime should be updated")
+ require.True(t, jobMeta.UpdateTime.After(beforeSecondSync) || jobMeta.UpdateTime.Equal(beforeSecondSync))
+ require.True(t, jobMeta.UpdateTime.Before(afterSecondSync) || jobMeta.UpdateTime.Equal(afterSecondSync))
+ })
+}
+
+func TestJobQueue_MarkComplete(t *testing.T) {
+ t.Run("in-progress to complete", func(t *testing.T) {
+ q := NewJobQueue()
+ job := types.NewJob(1, types.Offsets{Min: 100, Max: 200})
+
+ // Start with in-progress job
+ q.SyncJob(job.ID(), job)
+
+ beforeComplete := time.Now()
+ q.MarkComplete(job.ID(), types.JobStatusComplete)
+ afterComplete := time.Now()
+
+ // Verify job moved to completed buffer
+ var foundJob *JobWithMetadata
+ q.completed.Lookup(func(j *JobWithMetadata) bool {
+ if j.ID() == job.ID() {
+ foundJob = j
+ return true
+ }
+ return false
+ })
+ require.NotNil(t, foundJob, "job should be in completed buffer")
+ require.Equal(t, types.JobStatusComplete, foundJob.Status)
+ require.True(t, foundJob.UpdateTime.After(beforeComplete) || foundJob.UpdateTime.Equal(beforeComplete))
+ require.True(t, foundJob.UpdateTime.Before(afterComplete) || foundJob.UpdateTime.Equal(afterComplete))
+
+ // Verify removed from in-progress
+ _, ok := q.inProgress[job.ID()]
+ require.False(t, ok, "job should not be in in-progress map")
+ })
+
+ t.Run("pending to complete", func(t *testing.T) {
+ q := NewJobQueue()
+ job := types.NewJob(1, types.Offsets{Min: 100, Max: 200})
+
+ // Start with pending job
+ err := q.Enqueue(job, DefaultPriority)
+ require.NoError(t, err)
+
+ q.MarkComplete(job.ID(), types.JobStatusComplete)
+
+ // Verify job not in pending
+ _, ok := q.pending.Lookup(job.ID())
+ require.False(t, ok, "job should not be in pending queue")
+
+ // Verify job in completed buffer
+ var foundJob *JobWithMetadata
+ q.completed.Lookup(func(j *JobWithMetadata) bool {
+ if j.ID() == job.ID() {
+ foundJob = j
+ return true
+ }
+ return false
+ })
+ require.NotNil(t, foundJob, "job should be in completed buffer")
+ require.Equal(t, types.JobStatusComplete, foundJob.Status)
+ })
+
+ t.Run("non-existent job", func(t *testing.T) {
+ q := NewJobQueue()
+ logger := &testLogger{t: t}
+ q.logger = logger
+
+ q.MarkComplete("non-existent", types.JobStatusComplete)
+ // Should log error but not panic
+ })
+
+ t.Run("already completed job", func(t *testing.T) {
+ q := NewJobQueue()
+ logger := &testLogger{t: t}
+ q.logger = logger
+
+ job := types.NewJob(1, types.Offsets{Min: 100, Max: 200})
+ q.SyncJob(job.ID(), job)
+ q.MarkComplete(job.ID(), types.JobStatusComplete)
+
+ // Try to complete again
+ q.MarkComplete(job.ID(), types.JobStatusComplete)
+ // Should log error but not panic
+ })
+}
+
+// testLogger implements log.Logger for testing
+type testLogger struct {
+ t *testing.T
+}
+
+func (l *testLogger) Log(keyvals ...interface{}) error {
+ l.t.Log(keyvals...)
+ return nil
+}
diff --git a/pkg/blockbuilder/scheduler/scheduler.go b/pkg/blockbuilder/scheduler/scheduler.go
index 4eb1eaedde9ec..5e55e3123420d 100644
--- a/pkg/blockbuilder/scheduler/scheduler.go
+++ b/pkg/blockbuilder/scheduler/scheduler.go
@@ -144,14 +144,53 @@ func (s *BlockScheduler) runOnce(ctx context.Context) error {
for _, job := range jobs {
// TODO: end offset keeps moving each time we plan jobs, maybe we should not use it as part of the job ID
- if status, ok := s.queue.Exists(job.Job); ok {
- level.Debug(s.logger).Log("msg", "job already exists", "job", job, "status", status)
+
+ logger := log.With(
+ s.logger,
+ "job", job.Job.ID(),
+ "priority", job.Priority,
+ )
+
+ status, ok := s.queue.Exists(job.Job)
+
+ // scheduler is unaware of incoming job; enqueue
+ if !ok {
+ level.Debug(logger).Log(
+ "msg", "job does not exist, enqueueing",
+ )
+
+ // enqueue
+ if err := s.queue.Enqueue(job.Job, job.Priority); err != nil {
+ level.Error(logger).Log("msg", "failed to enqueue job", "err", err)
+ }
+
continue
}
- if err := s.queue.Enqueue(job.Job, job.Priority); err != nil {
- level.Error(s.logger).Log("msg", "failed to enqueue job", "job", job, "err", err)
+ // scheduler is aware of incoming job; handling depends on status
+ switch status {
+ case types.JobStatusPending:
+ level.Debug(s.logger).Log(
+ "msg", "job is pending, updating priority",
+ "old_priority", job.Priority,
+ )
+ s.queue.pending.UpdatePriority(job.Job.ID(), job)
+ case types.JobStatusInProgress:
+ level.Debug(s.logger).Log(
+ "msg", "job is in progress, ignoring",
+ )
+ case types.JobStatusComplete:
+ // shouldn't happen
+ level.Debug(s.logger).Log(
+ "msg", "job is complete, ignoring",
+ )
+ default:
+ level.Error(s.logger).Log(
+ "msg", "job has unknown status, ignoring",
+ "status", status,
+ )
}
+
}
return nil
@@ -165,22 +204,45 @@ func (s *BlockScheduler) publishLagMetrics(lag map[int32]kadm.GroupMemberLag) {
}
}
-func (s *BlockScheduler) HandleGetJob(ctx context.Context, builderID string) (*types.Job, bool, error) {
+func (s *BlockScheduler) HandleGetJob(ctx context.Context) (*types.Job, bool, error) {
select {
case <-ctx.Done():
return nil, false, ctx.Err()
default:
- return s.queue.Dequeue(builderID)
+ job, ok := s.queue.Dequeue()
+ return job, ok, nil
}
}
-func (s *BlockScheduler) HandleCompleteJob(_ context.Context, _ string, job *types.Job, _ bool) error {
- // TODO: handle commits
- s.queue.MarkComplete(job.ID)
+func (s *BlockScheduler) HandleCompleteJob(ctx context.Context, job *types.Job, success bool) (err error) {
+ logger := log.With(s.logger, "job", job.ID())
+
+ if success {
+ if err = s.offsetManager.Commit(
+ ctx,
+ job.Partition(),
+ job.Offsets().Max-1, // max is exclusive, so commit max-1
+ ); err == nil {
+ s.queue.MarkComplete(job.ID(), types.JobStatusComplete)
+ level.Info(logger).Log("msg", "job completed successfully")
+ return nil
+ }
+
+ level.Error(logger).Log("msg", "failed to commit offset", "err", err)
+ }
+
+ level.Error(logger).Log("msg", "job failed, re-enqueuing")
+ s.queue.MarkComplete(job.ID(), types.JobStatusFailed)
+ s.queue.pending.Push(
+ NewJobWithMetadata(
+ job,
+ DefaultPriority,
+ ),
+ )
return nil
}
-func (s *BlockScheduler) HandleSyncJob(_ context.Context, builderID string, job *types.Job) error {
- s.queue.SyncJob(job.ID, builderID, job)
+func (s *BlockScheduler) HandleSyncJob(_ context.Context, job *types.Job) error {
+ s.queue.SyncJob(job.ID(), job)
return nil
}
diff --git a/pkg/blockbuilder/scheduler/scheduler_test.go b/pkg/blockbuilder/scheduler/scheduler_test.go
index bc72d985f39b6..f13c6d49485c1 100644
--- a/pkg/blockbuilder/scheduler/scheduler_test.go
+++ b/pkg/blockbuilder/scheduler/scheduler_test.go
@@ -7,8 +7,10 @@ import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/twmb/franz-go/pkg/kadm"
"github.com/grafana/loki/v3/pkg/blockbuilder/types"
+ "github.com/grafana/loki/v3/pkg/kafka/partition"
)
type testEnv struct {
@@ -18,9 +20,33 @@ type testEnv struct {
builder *Worker
}
+type mockOffsetManager struct {
+ topic string
+ consumerGroup string
+}
+
+func (m *mockOffsetManager) Topic() string { return m.topic }
+func (m *mockOffsetManager) ConsumerGroup() string { return m.consumerGroup }
+func (m *mockOffsetManager) GroupLag(_ context.Context, _ time.Duration) (map[int32]kadm.GroupMemberLag, error) {
+ return nil, nil
+}
+func (m *mockOffsetManager) FetchLastCommittedOffset(_ context.Context, _ int32) (int64, error) {
+ return 0, nil
+}
+func (m *mockOffsetManager) FetchPartitionOffset(_ context.Context, _ int32, _ partition.SpecialOffset) (int64, error) {
+ return 0, nil
+}
+func (m *mockOffsetManager) Commit(_ context.Context, _ int32, _ int64) error {
+ return nil
+}
+
func newTestEnv(builderID string) *testEnv {
queue := NewJobQueue()
- scheduler := NewScheduler(Config{}, queue, nil, log.NewNopLogger(), prometheus.NewRegistry())
+ mockOffsetMgr := &mockOffsetManager{
+ topic: "test-topic",
+ consumerGroup: "test-group",
+ }
+ scheduler := NewScheduler(Config{}, queue, mockOffsetMgr, log.NewNopLogger(), prometheus.NewRegistry())
transport := types.NewMemoryTransport(scheduler)
builder := NewWorker(builderID, transport)
@@ -51,8 +77,8 @@ func TestScheduleAndProcessJob(t *testing.T) {
if !ok {
t.Fatal("expected to receive job")
}
- if receivedJob.ID != job.ID {
- t.Errorf("got job ID %s, want %s", receivedJob.ID, job.ID)
+ if receivedJob.ID() != job.ID() {
+ t.Errorf("got job ID %s, want %s", receivedJob.ID(), job.ID())
}
// Builder completes job
@@ -124,7 +150,7 @@ func TestMultipleBuilders(t *testing.T) {
}
// Verify different jobs were assigned
- if receivedJob1.ID == receivedJob2.ID {
+ if receivedJob1.ID() == receivedJob2.ID() {
t.Error("builders received same job")
}
diff --git a/pkg/blockbuilder/scheduler/strategy.go b/pkg/blockbuilder/scheduler/strategy.go
index 75719140a4ea0..d4d14bff0e44a 100644
--- a/pkg/blockbuilder/scheduler/strategy.go
+++ b/pkg/blockbuilder/scheduler/strategy.go
@@ -18,7 +18,7 @@ type OffsetReader interface {
type Planner interface {
Name() string
- Plan(ctx context.Context) ([]*JobWithPriority[int], error)
+ Plan(ctx context.Context) ([]*JobWithMetadata, error)
}
const (
@@ -46,14 +46,14 @@ func (p *RecordCountPlanner) Name() string {
return RecordCountStrategy
}
-func (p *RecordCountPlanner) Plan(ctx context.Context) ([]*JobWithPriority[int], error) {
+func (p *RecordCountPlanner) Plan(ctx context.Context) ([]*JobWithMetadata, error) {
offsets, err := p.offsetReader.GroupLag(ctx)
if err != nil {
level.Error(p.logger).Log("msg", "failed to get group lag", "err", err)
return nil, err
}
- jobs := make([]*JobWithPriority[int], 0, len(offsets))
+ jobs := make([]*JobWithMetadata, 0, len(offsets))
for _, partitionOffset := range offsets {
// kadm.GroupMemberLag contains valid Commit.At even when consumer group never committed any offset.
// no additional validation is needed here
@@ -69,11 +69,12 @@ func (p *RecordCountPlanner) Plan(ctx context.Context) ([]*JobWithPriority[int],
for currentStart := startOffset; currentStart < endOffset; {
currentEnd := min(currentStart+p.targetRecordCount, endOffset)
- job := NewJobWithPriority(
+ job := NewJobWithMetadata(
types.NewJob(partitionOffset.Partition, types.Offsets{
Min: currentStart,
Max: currentEnd,
- }), int(endOffset-currentStart), // priority is remaining records to process
+ }),
+ int(endOffset-currentStart), // priority is remaining records to process
)
jobs = append(jobs, job)
@@ -83,8 +84,8 @@ func (p *RecordCountPlanner) Plan(ctx context.Context) ([]*JobWithPriority[int],
// Sort jobs by partition then priority
sort.Slice(jobs, func(i, j int) bool {
- if jobs[i].Job.Partition != jobs[j].Job.Partition {
- return jobs[i].Job.Partition < jobs[j].Job.Partition
+ if jobs[i].Job.Partition() != jobs[j].Job.Partition() {
+ return jobs[i].Job.Partition() < jobs[j].Job.Partition()
}
return jobs[i].Priority > jobs[j].Priority
})
diff --git a/pkg/blockbuilder/scheduler/strategy_test.go b/pkg/blockbuilder/scheduler/strategy_test.go
index 9c7b732fb4e08..6771aaf9868c5 100644
--- a/pkg/blockbuilder/scheduler/strategy_test.go
+++ b/pkg/blockbuilder/scheduler/strategy_test.go
@@ -19,11 +19,19 @@ func (m *mockOffsetReader) GroupLag(_ context.Context) (map[int32]kadm.GroupMemb
return m.groupLag, nil
}
+// compareJobs compares two JobWithMetadata instances ignoring UpdateTime
+func compareJobs(t *testing.T, expected, actual *JobWithMetadata) {
+ require.Equal(t, expected.Job, actual.Job)
+ require.Equal(t, expected.Priority, actual.Priority)
+ require.Equal(t, expected.Status, actual.Status)
+ require.Equal(t, expected.StartTime, actual.StartTime)
+}
+
func TestRecordCountPlanner_Plan(t *testing.T) {
for _, tc := range []struct {
name string
recordCount int64
- expectedJobs []*JobWithPriority[int]
+ expectedJobs []*JobWithMetadata
groupLag map[int32]kadm.GroupMemberLag
}{
{
@@ -40,8 +48,8 @@ func TestRecordCountPlanner_Plan(t *testing.T) {
Partition: 0,
},
},
- expectedJobs: []*JobWithPriority[int]{
- NewJobWithPriority(
+ expectedJobs: []*JobWithMetadata{
+ NewJobWithMetadata(
types.NewJob(0, types.Offsets{Min: 101, Max: 150}),
49, // 150-101
),
@@ -61,12 +69,12 @@ func TestRecordCountPlanner_Plan(t *testing.T) {
Partition: 0,
},
},
- expectedJobs: []*JobWithPriority[int]{
- NewJobWithPriority(
+ expectedJobs: []*JobWithMetadata{
+ NewJobWithMetadata(
types.NewJob(0, types.Offsets{Min: 101, Max: 151}),
99, // priority is total remaining: 200-101
),
- NewJobWithPriority(
+ NewJobWithMetadata(
types.NewJob(0, types.Offsets{Min: 151, Max: 200}),
49, // priority is total remaining: 200-151
),
@@ -95,19 +103,19 @@ func TestRecordCountPlanner_Plan(t *testing.T) {
Partition: 1,
},
},
- expectedJobs: []*JobWithPriority[int]{
- NewJobWithPriority(
+ expectedJobs: []*JobWithMetadata{
+ NewJobWithMetadata(
+ types.NewJob(0, types.Offsets{Min: 101, Max: 150}),
+ 49, // priority is total remaining: 150-101
+ ),
+ NewJobWithMetadata(
types.NewJob(1, types.Offsets{Min: 201, Max: 301}),
199, // priority is total remaining: 400-201
),
- NewJobWithPriority(
+ NewJobWithMetadata(
types.NewJob(1, types.Offsets{Min: 301, Max: 400}),
99, // priority is total remaining: 400-301
),
- NewJobWithPriority(
- types.NewJob(0, types.Offsets{Min: 101, Max: 150}),
- 49, // priority is total remaining: 150-101
- ),
},
},
{
@@ -145,7 +153,9 @@ func TestRecordCountPlanner_Plan(t *testing.T) {
require.NoError(t, err)
require.Equal(t, len(tc.expectedJobs), len(jobs))
- require.ElementsMatch(t, tc.expectedJobs, jobs)
+ for i := range tc.expectedJobs {
+ compareJobs(t, tc.expectedJobs[i], jobs[i])
+ }
})
}
}
diff --git a/pkg/blockbuilder/types/grpc_transport.go b/pkg/blockbuilder/types/grpc_transport.go
index 3b90ba9f20f06..4d52bdfc7745e 100644
--- a/pkg/blockbuilder/types/grpc_transport.go
+++ b/pkg/blockbuilder/types/grpc_transport.go
@@ -110,9 +110,9 @@ func protoToJob(p *proto.Job) *Job {
return nil
}
return &Job{
- ID: p.GetId(),
- Partition: p.GetPartition(),
- Offsets: Offsets{
+ id: p.GetId(),
+ partition: p.GetPartition(),
+ offsets: Offsets{
Min: p.GetOffsets().GetMin(),
Max: p.GetOffsets().GetMax(),
},
@@ -125,11 +125,11 @@ func jobToProto(j *Job) *proto.Job {
return nil
}
return &proto.Job{
- Id: j.ID,
- Partition: j.Partition,
+ Id: j.ID(),
+ Partition: j.Partition(),
Offsets: &proto.Offsets{
- Min: j.Offsets.Min,
- Max: j.Offsets.Max,
+ Min: j.offsets.Min,
+ Max: j.offsets.Max,
},
}
}
diff --git a/pkg/blockbuilder/types/interfaces.go b/pkg/blockbuilder/types/interfaces.go
index 2144e83878cb5..5ed51b39caf2f 100644
--- a/pkg/blockbuilder/types/interfaces.go
+++ b/pkg/blockbuilder/types/interfaces.go
@@ -15,11 +15,11 @@ type BuilderTransport interface {
// SchedulerHandler defines the business logic for handling builder requests
type SchedulerHandler interface {
// HandleGetJob processes a request for a new job
- HandleGetJob(ctx context.Context, builderID string) (*Job, bool, error)
+ HandleGetJob(ctx context.Context) (*Job, bool, error)
// HandleCompleteJob processes a job completion notification
- HandleCompleteJob(ctx context.Context, builderID string, job *Job, success bool) error
+ HandleCompleteJob(ctx context.Context, job *Job, success bool) error
// HandleSyncJob processes a job sync request
- HandleSyncJob(ctx context.Context, builderID string, job *Job) error
+ HandleSyncJob(ctx context.Context, job *Job) error
}
// Request/Response message types
diff --git a/pkg/blockbuilder/types/job.go b/pkg/blockbuilder/types/job.go
index 9cf94daebd484..ca23aa003b96e 100644
--- a/pkg/blockbuilder/types/job.go
+++ b/pkg/blockbuilder/types/job.go
@@ -4,21 +4,53 @@ import "fmt"
// Job represents a block building task.
type Job struct {
- ID string
+ id string
// Partition and offset information
- Partition int32
- Offsets Offsets
+ partition int32
+ offsets Offsets
+}
+
+func (j *Job) ID() string {
+ return j.id
+}
+
+func (j *Job) Partition() int32 {
+ return j.partition
+}
+
+func (j *Job) Offsets() Offsets {
+ return j.offsets
}
// JobStatus represents the current state of a job
type JobStatus int
const (
- JobStatusPending JobStatus = iota
+ JobStatusUnknown JobStatus = iota // zero value, largely unused
+ JobStatusPending
JobStatusInProgress
JobStatusComplete
+ JobStatusFailed // Job failed and may be retried
+ JobStatusExpired // Job failed too many times or is too old
)
+func (s JobStatus) String() string {
+ switch s {
+ case JobStatusPending:
+ return "pending"
+ case JobStatusInProgress:
+ return "in_progress"
+ case JobStatusComplete:
+ return "complete"
+ case JobStatusFailed:
+ return "failed"
+ case JobStatusExpired:
+ return "expired"
+ default:
+ return "unknown"
+ }
+}
+
// Offsets represents the range of offsets to process
type Offsets struct {
Min int64
@@ -28,9 +60,9 @@ type Offsets struct {
// NewJob creates a new job with the given partition and offsets
func NewJob(partition int32, offsets Offsets) *Job {
return &Job{
- ID: GenerateJobID(partition, offsets),
- Partition: partition,
- Offsets: offsets,
+ id: GenerateJobID(partition, offsets),
+ partition: partition,
+ offsets: offsets,
}
}
diff --git a/pkg/blockbuilder/types/scheduler_server.go b/pkg/blockbuilder/types/scheduler_server.go
index c2756903859f2..a5deaa276d622 100644
--- a/pkg/blockbuilder/types/scheduler_server.go
+++ b/pkg/blockbuilder/types/scheduler_server.go
@@ -20,8 +20,8 @@ func NewSchedulerServer(handler SchedulerHandler) proto.SchedulerServiceServer {
}
// GetJob implements proto.SchedulerServiceServer
-func (s *schedulerServer) GetJob(ctx context.Context, req *proto.GetJobRequest) (*proto.GetJobResponse, error) {
- job, ok, err := s.handler.HandleGetJob(ctx, req.BuilderId)
+func (s *schedulerServer) GetJob(ctx context.Context, _ *proto.GetJobRequest) (*proto.GetJobResponse, error) {
+ job, ok, err := s.handler.HandleGetJob(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -39,7 +39,7 @@ func (s *schedulerServer) GetJob(ctx context.Context, req *proto.GetJobRequest)
// CompleteJob implements proto.SchedulerServiceServer
func (s *schedulerServer) CompleteJob(ctx context.Context, req *proto.CompleteJobRequest) (*proto.CompleteJobResponse, error) {
- if err := s.handler.HandleCompleteJob(ctx, req.BuilderId, protoToJob(req.Job), req.Success); err != nil {
+ if err := s.handler.HandleCompleteJob(ctx, protoToJob(req.Job), req.Success); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &proto.CompleteJobResponse{}, nil
@@ -47,7 +47,7 @@ func (s *schedulerServer) CompleteJob(ctx context.Context, req *proto.CompleteJo
// SyncJob implements proto.SchedulerServiceServer
func (s *schedulerServer) SyncJob(ctx context.Context, req *proto.SyncJobRequest) (*proto.SyncJobResponse, error) {
- if err := s.handler.HandleSyncJob(ctx, req.BuilderId, protoToJob(req.Job)); err != nil {
+ if err := s.handler.HandleSyncJob(ctx, protoToJob(req.Job)); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &proto.SyncJobResponse{}, nil
diff --git a/pkg/blockbuilder/types/transport.go b/pkg/blockbuilder/types/transport.go
index 6f7dc41e394fe..ac8917b854c36 100644
--- a/pkg/blockbuilder/types/transport.go
+++ b/pkg/blockbuilder/types/transport.go
@@ -36,8 +36,8 @@ func NewMemoryTransport(scheduler SchedulerHandler) *MemoryTransport {
}
}
-func (t *MemoryTransport) SendGetJobRequest(ctx context.Context, req *GetJobRequest) (*GetJobResponse, error) {
- job, ok, err := t.scheduler.HandleGetJob(ctx, req.BuilderID)
+func (t *MemoryTransport) SendGetJobRequest(ctx context.Context, _ *GetJobRequest) (*GetJobResponse, error) {
+ job, ok, err := t.scheduler.HandleGetJob(ctx)
if err != nil {
return nil, err
}
@@ -48,9 +48,9 @@ func (t *MemoryTransport) SendGetJobRequest(ctx context.Context, req *GetJobRequ
}
func (t *MemoryTransport) SendCompleteJob(ctx context.Context, req *CompleteJobRequest) error {
- return t.scheduler.HandleCompleteJob(ctx, req.BuilderID, req.Job, req.Success)
+ return t.scheduler.HandleCompleteJob(ctx, req.Job, req.Success)
}
func (t *MemoryTransport) SendSyncJob(ctx context.Context, req *SyncJobRequest) error {
- return t.scheduler.HandleSyncJob(ctx, req.BuilderID, req.Job)
+ return t.scheduler.HandleSyncJob(ctx, req.Job)
}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 395a891e62103..e30136c2b8637 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -1866,7 +1866,7 @@ func (t *Loki) initBlockScheduler() (services.Service, error) {
s := blockscheduler.NewScheduler(
t.Cfg.BlockScheduler,
- blockscheduler.NewJobQueue(),
+ blockscheduler.NewJobQueueWithLogger(logger),
offsetManager,
logger,
prometheus.DefaultRegisterer,
|
feat
|
job tracking & offset commits (#15338)
|
c790ad8f08a9870fe5e0e8942cf1b8ca2778cb1b
|
2025-03-13 01:16:55
|
Alexandre Chouinard
|
docs(LIDs): Add Loki mixin configuration improvements draft (#16692)
| false
|
diff --git a/docs/sources/community/lids/__DRAFT__loki-mixin-configuration-improvements.md b/docs/sources/community/lids/__DRAFT__loki-mixin-configuration-improvements.md
new file mode 100644
index 0000000000000..3ae167df904c5
--- /dev/null
+++ b/docs/sources/community/lids/__DRAFT__loki-mixin-configuration-improvements.md
@@ -0,0 +1,94 @@
+---
+title: "0005: Loki mixin configuration improvements"
+description: "Improve Loki mixin configurations"
+draft: false
+---
+
+# 0005: Loki mixin configuration improvements
+
+**Author:** Alexandre Chouinard ([email protected])
+
+**Date:** 03/2025
+
+**Sponsor(s):** N/A
+
+**Type:** Feature
+
+**Status:** Draft
+
+**Related issues/PRs:**
+- https://github.com/grafana/loki/issues/13631
+- https://github.com/grafana/loki/issues/15881
+- https://github.com/grafana/loki/issues/11820
+- https://github.com/grafana/loki/issues/11806
+- https://github.com/grafana/loki/issues/7730
+- and more ...
+
+**Thread from [mailing list](https://groups.google.com/forum/#!forum/lokiproject):** N/A
+
+---
+
+## Background
+
+There is no easy way to set up dashboards and alerts for Loki on a pre-existing Prometheus stack that does not use the Prometheus Operator with a specific configuration.
+
+The metrics selectors are hardcoded, making the dashboard unusable without manual modifications in many cases.
+It is assumed that `job`, `cluster`, `namespace`, `container` and/or a combination of other labels are present on metrics and have very specific values.
+
+## Problem Statement
+
+This renders the dashboards and alerts unusable for setups that do not conform to the current assumptions about which label(s) should be present in the metrics.
+
+A good example of that would be the "job" label used everywhere:
+[`job=~\"$namespace/bloom-planner\"`](https://github.com/grafana/loki/blob/475d25f459575312adb25ff90abf8f10d521ad4b/production/loki-mixin/dashboards/dashboard-bloom-build.json#L267C101-L267C134)
+
+Usually the job label refer to the task name used to scrape the targets, as per [Prometheus documentation](https://prometheus.io/docs/concepts/jobs_instances/), and
+in k8s, if you are not using `prometheus-operator` with `ServiceMonitor`, it's pretty common to have something like this as a scraping config:
+```
+ - job_name: "kubernetes-pods" # Can actually be anything you want.
+ kubernetes_sd_configs:
+ - role: pod
+ relabel_configs:
+ # Cluster label is "required" by kubernetes-mixin dashboards
+ - target_label: cluster
+ replacement: '${cluster_label}'
+ ...
+```
+which would scrape all pods and yield something like:
+```
+up{job="kubernetes-pods", ...}
+```
+Right off the bat, that makes the dashboards unusable because it's incompatible with what is **hardcoded** in the dashboards and alerts.
+
+## Goals
+
+Ideally, selectors should default to the values required internally by Grafana but remain configurable so users can tailor them to their setup.
+
+A good example of this is how [kubernetes-monitoring/kubernetes-mixin](kubernetes-monitoring) did it:
+https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/1fa3b6731c93eac6d5b8c3c3b087afab2baabb42/config.libsonnet#L20-L33
+Every possible selector is configurable and thus allow for various setup to properly work.
+
+The structure is already there to support this. It just has not been leveraged properly.
+
+## Non-Goals (optional)
+
+It would be desirable to create some automated checks verifying that all metrics used in dashboard and alerts are using the proper selector(s) from the configuration.
+There are many issues in the repository about new dashboards or dashboard updates not using the proper labels on metrics.
+
+## Proposals
+
+### Proposal 0: Do nothing
+
+This forces the community to either manually edit the dashboards/alerts or conform to a specific metric collection approach for Loki.
+
+### Proposal 1: Allow metrics label selectors to be configurable
+
+This will require a good amount of refactoring.
+
+It allows easier adoption of the "official" dashboards and alerts by the community.
+
+Define once, reuse everywhere. (Currently, updating requires extensive search and replace.)
+
+## Other Notes
+
+If this proposal is accepted, I am willing to do the necessary work to move it forward.
|
docs
|
Add Loki mixin configuration improvements draft (#16692)
|
908692794da082a96a40c03c94d2aadeac6971b3
|
2022-03-24 16:05:56
|
Robert Jacob
|
operator: Update operator-sdk to 1.18.1 (#5704)
| false
|
diff --git a/operator/.bingo/Variables.mk b/operator/.bingo/Variables.mk
index 0f7d0c9592ec2..4c2d19975f26e 100644
--- a/operator/.bingo/Variables.mk
+++ b/operator/.bingo/Variables.mk
@@ -23,11 +23,11 @@ $(BINGO): $(BINGO_DIR)/bingo.mod
@echo "(re)installing $(GOBIN)/bingo-v0.5.2"
@cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.5.2 "github.com/bwplotka/bingo"
-CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.5.0
+CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.8.0
$(CONTROLLER_GEN): $(BINGO_DIR)/controller-gen.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
- @echo "(re)installing $(GOBIN)/controller-gen-v0.5.0"
- @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=controller-gen.mod -o=$(GOBIN)/controller-gen-v0.5.0 "sigs.k8s.io/controller-tools/cmd/controller-gen"
+ @echo "(re)installing $(GOBIN)/controller-gen-v0.8.0"
+ @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=controller-gen.mod -o=$(GOBIN)/controller-gen-v0.8.0 "sigs.k8s.io/controller-tools/cmd/controller-gen"
GOFUMPT := $(GOBIN)/gofumpt-v0.1.1
$(GOFUMPT): $(BINGO_DIR)/gofumpt.mod
@@ -47,11 +47,11 @@ $(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod
@echo "(re)installing $(GOBIN)/kustomize-v3.8.7"
@cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=kustomize.mod -o=$(GOBIN)/kustomize-v3.8.7 "sigs.k8s.io/kustomize/kustomize/v3"
-OPERATOR_SDK := $(GOBIN)/operator-sdk-v1.11.0
+OPERATOR_SDK := $(GOBIN)/operator-sdk-v1.18.1
$(OPERATOR_SDK): $(BINGO_DIR)/operator-sdk.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
- @echo "(re)installing $(GOBIN)/operator-sdk-v1.11.0"
- @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=operator-sdk.mod -o=$(GOBIN)/operator-sdk-v1.11.0 "github.com/operator-framework/operator-sdk/cmd/operator-sdk"
+ @echo "(re)installing $(GOBIN)/operator-sdk-v1.18.1"
+ @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=operator-sdk.mod -o=$(GOBIN)/operator-sdk-v1.18.1 "github.com/operator-framework/operator-sdk/cmd/operator-sdk"
PROMTOOL := $(GOBIN)/promtool-v1.8.2-0.20220211202545-56e14463bccf
$(PROMTOOL): $(BINGO_DIR)/promtool.mod
diff --git a/operator/.bingo/controller-gen.mod b/operator/.bingo/controller-gen.mod
index c62983ce8b68e..e4f2c319b68e5 100644
--- a/operator/.bingo/controller-gen.mod
+++ b/operator/.bingo/controller-gen.mod
@@ -2,4 +2,4 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
go 1.17
-require sigs.k8s.io/controller-tools v0.5.0 // cmd/controller-gen
+require sigs.k8s.io/controller-tools v0.8.0 // cmd/controller-gen
diff --git a/operator/.bingo/controller-gen.sum b/operator/.bingo/controller-gen.sum
index 922956daa6c92..f8403b47f6f59 100644
--- a/operator/.bingo/controller-gen.sum
+++ b/operator/.bingo/controller-gen.sum
@@ -88,6 +88,7 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@@ -103,6 +104,7 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
@@ -113,10 +115,12 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobuffalo/flect v0.2.2 h1:PAVD7sp0KOdfswjAw9BpLCU9hXo7wFSzgpQ+zNeks/A=
github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
+github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -149,6 +153,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -203,6 +208,7 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -251,6 +257,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -314,6 +321,7 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
+github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -386,6 +394,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -415,6 +424,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -466,6 +476,7 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -473,6 +484,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -517,6 +529,7 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -596,9 +609,11 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -609,10 +624,13 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw=
k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8=
+k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro=
k8s.io/apiextensions-apiserver v0.20.2 h1:rfrMWQ87lhd8EzQWRnbQ4gXrniL/yTRBgYH1x1+BLlo=
k8s.io/apiextensions-apiserver v0.20.2/go.mod h1:F6TXp389Xntt+LUq3vw6HFOLttPa0V8821ogLGwb6Zs=
+k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY=
k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg=
k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ=
k8s.io/apiserver v0.20.2/go.mod h1:2nKd93WyMhZx4Hp3RfgH2K5PhwyTrprrkWYnI7id7jA=
k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
k8s.io/code-generator v0.20.2/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
@@ -623,17 +641,24 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/controller-tools v0.5.0 h1:3u2RCwOlp0cjCALAigpOcbAf50pE+kHSdueUosrC/AE=
sigs.k8s.io/controller-tools v0.5.0/go.mod h1:JTsstrMpxs+9BUj6eGuAaEb6SDSPTeVtUyp0jmnAM/I=
+sigs.k8s.io/controller-tools v0.8.0 h1:uUkfTGEwrguqYYfcI2RRGUnC8mYdCFDqfwPKUcNJh1o=
+sigs.k8s.io/controller-tools v0.8.0/go.mod h1:qE2DXhVOiEq5ijmINcFbqi9GZrrUjzB1TuJU0xa6eoY=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
diff --git a/operator/.bingo/operator-sdk.mod b/operator/.bingo/operator-sdk.mod
index 972f6a0086924..513ac5c640a93 100644
--- a/operator/.bingo/operator-sdk.mod
+++ b/operator/.bingo/operator-sdk.mod
@@ -10,4 +10,4 @@ replace github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.10.0
replace golang.org/x/text => golang.org/x/text v0.3.3
-require github.com/operator-framework/operator-sdk v1.11.0 // cmd/operator-sdk
+require github.com/operator-framework/operator-sdk v1.18.1 // cmd/operator-sdk
diff --git a/operator/.bingo/operator-sdk.sum b/operator/.bingo/operator-sdk.sum
index 550c2b112d81b..d81eea09820b8 100644
--- a/operator/.bingo/operator-sdk.sum
+++ b/operator/.bingo/operator-sdk.sum
@@ -13,31 +13,53 @@ cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
+github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
@@ -48,6 +70,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
@@ -103,6 +127,9 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e h1:GCzyKMDDjSGnlpl3clrdAK7I1AaVoaiKDOYkUzChZzg=
+github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -120,6 +147,8 @@ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -129,6 +158,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
@@ -145,10 +175,13 @@ github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
@@ -162,9 +195,15 @@ github.com/cloudflare/cfssl v1.5.0/go.mod h1:sPPkBS5L8l8sRc/IOO1jG51Xb34u+TYhL6P
github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4=
github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
@@ -191,6 +230,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
@@ -270,6 +310,10 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@@ -277,6 +321,7 @@ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch/v5 v5.1.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
@@ -287,19 +332,25 @@ github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc=
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fatih/structtag v1.1.0 h1:6j4mUV/ES2duvnAzKMFkN6/A5mCaNYPD3xfbAkLLOF8=
github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
+github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -330,6 +381,7 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg
github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
@@ -346,12 +398,16 @@ github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
@@ -378,6 +434,8 @@ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
@@ -396,6 +454,8 @@ github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6
github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
github.com/gobuffalo/flect v0.2.2 h1:PAVD7sp0KOdfswjAw9BpLCU9hXo7wFSzgpQ+zNeks/A=
github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
+github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY=
+github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs=
github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q=
@@ -406,6 +466,7 @@ github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg=
github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -419,23 +480,30 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -445,6 +513,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -456,14 +525,22 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/cel-go v0.9.0 h1:u1hg7lcZ/XWw2d3aV1jFS30ijQQ6q0/h1C2ZBeBD1gY=
+github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
+github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=
github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
@@ -474,11 +551,19 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@@ -504,6 +589,7 @@ github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@@ -513,9 +599,12 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWet
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-health-probe v0.3.2/go.mod h1:izVOQ4RWbjUR6lm4nn+VLJyQ+FyaiGmprEYgI04Gs7U=
github.com/h2non/filetype v1.1.1 h1:xvOwnXKAckvtLWsN398qS9QhlxlnVXBjXBydK2/UFB4=
github.com/h2non/filetype v1.1.1/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
@@ -556,7 +645,9 @@ github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8=
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
+github.com/iancoleman/strcase v0.1.2 h1:gnomlvw9tnV3ITTAxzKSgTF+8kFWcU/f+TgttpXGz1U=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
@@ -582,6 +673,9 @@ github.com/joelanford/ignore v0.0.0-20210607151042-0d25dc18b62d/go.mod h1:7HQupe
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -589,6 +683,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -639,6 +734,8 @@ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0Q
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -646,6 +743,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g=
github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs=
github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
@@ -690,6 +789,8 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
@@ -699,12 +800,15 @@ github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0Gq
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
@@ -743,6 +847,7 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
@@ -756,6 +861,10 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
+github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU=
+github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -783,13 +892,42 @@ github.com/operator-framework/api v0.7.1/go.mod h1:L7IvLd/ckxJEJg/t4oTTlnHKAJIP/
github.com/operator-framework/api v0.10.0/go.mod h1:tV0BUNvly7szq28ZPBXhjp1Sqg5yHCOeX19ui9K4vjI=
github.com/operator-framework/api v0.10.5 h1:/WvLKOPo8zZMyEmuW0kLC0PJBt4Xal8HZkFioKIxqTA=
github.com/operator-framework/api v0.10.5/go.mod h1:tV0BUNvly7szq28ZPBXhjp1Sqg5yHCOeX19ui9K4vjI=
+github.com/operator-framework/api v0.10.6 h1:Vi2l5xbdDFLa9ktpOPpfsepmT+mtHD9ztI8PDWMZ1Co=
+github.com/operator-framework/api v0.10.6/go.mod h1:PtQSNSuVrhSC6YE6JJJZv3nnZJc32osKX8FmFUZK05U=
+github.com/operator-framework/api v0.10.7 h1:GlZJ6m+0WSVdSsSjTbhKKAvHXamWJXhwXHUhVwL8LBE=
+github.com/operator-framework/api v0.10.7/go.mod h1:PtQSNSuVrhSC6YE6JJJZv3nnZJc32osKX8FmFUZK05U=
+github.com/operator-framework/api v0.11.1-0.20220110184307-ff6b5ebe3c25 h1:MYC0rvZ5jrzS+2LdPpBhtz8sznyVL5jG7NTyIlSPy8s=
+github.com/operator-framework/api v0.11.1-0.20220110184307-ff6b5ebe3c25/go.mod h1:FTiYGm11fZQ3cSX+EQHc/UWoGZAwkGfyeHU+wMJ8jmA=
+github.com/operator-framework/api v0.11.2-0.20220118170607-6b187a1d0908 h1:pD3v8KvLkf3H99fs6iMONVgoUzK8IoVJXcGTiuZGq2E=
+github.com/operator-framework/api v0.14.0 h1:5nk8fQL8l+dDxi11hZi0T7nqhhoIQLn+qL2DhMEGnoE=
+github.com/operator-framework/helm-operator-plugins v0.0.9 h1:G5aBY5sPrNXcRiKLpAaBMOYm7q0+qCmk9XWOAL/ZJuc=
github.com/operator-framework/java-operator-plugins v0.0.0-20210708174638-463fb91f3d5e h1:LMsT59IJqaLn7kD6DnZFy0IouRufXyJHTT+mXQrl9Ps=
github.com/operator-framework/java-operator-plugins v0.0.0-20210708174638-463fb91f3d5e/go.mod h1:sGKGELFkUeRqElcyvyPC89bC76YnCL7MPMa13P0AQcw=
+github.com/operator-framework/java-operator-plugins v0.1.0 h1:khkYsrkEG4m+wT+oPjZYmWXo8jd0QQ8E4agSrqrhPhU=
+github.com/operator-framework/java-operator-plugins v0.1.0/go.mod h1:sGKGELFkUeRqElcyvyPC89bC76YnCL7MPMa13P0AQcw=
+github.com/operator-framework/java-operator-plugins v0.2.0 h1:nIc3/pmH9j9lA6IzcnBBOl1D1V7XFculETUJrucOcrk=
+github.com/operator-framework/java-operator-plugins v0.3.0 h1:K+gdg1cLugxP3KbGNc1SttKIY69z7ywBFaXW0vVIG9o=
github.com/operator-framework/operator-lib v0.5.0/go.mod h1:33Skl0vjauYx3nAS+cSFbHNkX8do7weQ6s5siIV/w1E=
+github.com/operator-framework/operator-lib v0.6.0/go.mod h1:2Z32GTTJUz2/f+OKcoJXsVnAyRwcXx7mGmQsdhIAIIE=
github.com/operator-framework/operator-registry v1.17.4 h1:bYoWevurGEUshSMu8QNcImhLuPZJ/a4MbsUuvBjFEzA=
github.com/operator-framework/operator-registry v1.17.4/go.mod h1:k0rWVT23QoN1prs9tX8PHjRVXz6FMZfUJ5EIZSrqh9E=
+github.com/operator-framework/operator-registry v1.19.5 h1:2LtfN4hrOn+z4MwQsFtJVkyQocQPV+rDrNoawwnBhPI=
github.com/operator-framework/operator-sdk v1.11.0 h1:CpNNSLbrSIR3j1O0YH9CmayxD6fIkuW6KB+m6jWsph8=
github.com/operator-framework/operator-sdk v1.11.0/go.mod h1:fkJPPNnepIgLn9FJDZhXKYrx+LsU8/4TWLT4RF0Whr0=
+github.com/operator-framework/operator-sdk v1.13.0 h1:XOW3KMLf8DyXisC/gmltRaq/rBJy45QsBLINOiFDVD0=
+github.com/operator-framework/operator-sdk v1.13.0/go.mod h1:VqjJ/VkDOG8lc2YpcXFZN9xQWCjUuyipBOvfpnmevQQ=
+github.com/operator-framework/operator-sdk v1.14.0 h1:3QUzOSugoChe2wwdCj9C8IfvYWp2UasYdMlxflaKjiY=
+github.com/operator-framework/operator-sdk v1.14.0/go.mod h1:OgtK4cle30kiHnVD+NL6QlVeZbVIhIXWVzNTmSeUTLY=
+github.com/operator-framework/operator-sdk v1.15.0 h1:TC+Yk6ZKHaOzAvucdPo5CFLuHFOCWTFjzDpI3z1ey4U=
+github.com/operator-framework/operator-sdk v1.15.0/go.mod h1:OgtK4cle30kiHnVD+NL6QlVeZbVIhIXWVzNTmSeUTLY=
+github.com/operator-framework/operator-sdk v1.16.0 h1:qQUAgnLoAAJpJ1XUEYWgCad6nwH+TS9glIsLBni0q4s=
+github.com/operator-framework/operator-sdk v1.16.0/go.mod h1:UfLyo5JsJH3dQryKcwY5jYybM/mNiVRznoNB1/2fpTE=
+github.com/operator-framework/operator-sdk v1.17.0 h1:BY2pcNSFzYJnMOoskuww42z0LiJdWk8oYhaLEc8kvUY=
+github.com/operator-framework/operator-sdk v1.17.0/go.mod h1:TFZHfJvzPJ3fZsHH72otNlIugInY0hTrBHpWyxgAJ/w=
+github.com/operator-framework/operator-sdk v1.18.0 h1:v8EOswbzaGAcheudG87imFPQys/Pjh1dgqY5T77WMLY=
+github.com/operator-framework/operator-sdk v1.18.0/go.mod h1:9KYwoQDtx52btnE1v3qrI4I3MW6t8zQESouGHPbEusA=
+github.com/operator-framework/operator-sdk v1.18.1 h1:pRKZnL8P1uMVGdyGWEKC3mu0mrQv1fl2pd8V/5fuJrY=
+github.com/operator-framework/operator-sdk v1.18.1/go.mod h1:ceHQ0njU5Z47jQtTv8cO+dGXt5wOmL88hRKYWsHjiwo=
github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
@@ -803,6 +941,8 @@ github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtb
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
@@ -848,6 +988,7 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -863,6 +1004,7 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
@@ -897,6 +1039,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
@@ -913,8 +1056,12 @@ github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHN
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -925,7 +1072,10 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
+github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
@@ -948,6 +1098,7 @@ github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1C
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@@ -975,7 +1126,9 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
@@ -997,8 +1150,17 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@@ -1009,6 +1171,31 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -1027,6 +1214,7 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1056,6 +1244,7 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1079,6 +1268,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@@ -1089,8 +1279,11 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1129,18 +1322,33 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1149,12 +1357,23 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
@@ -1216,32 +1435,53 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200928205150-006507a75852/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6rNY2pbRfbIMVnepueo=
+golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk=
+golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7 h1:BXxu8t6QN0G1uff4bzZzSkpsax8+ALqTGUtz08QrV00=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1252,6 +1492,8 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs=
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1302,17 +1544,36 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q=
+golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1334,7 +1595,19 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1366,11 +1639,38 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1386,8 +1686,24 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200709232328-d8193ee9cc3e/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1402,6 +1718,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1423,6 +1741,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
@@ -1435,6 +1755,7 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1459,6 +1780,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
@@ -1466,6 +1788,12 @@ k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s=
k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y=
k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU=
+k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg=
+k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
+k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
+k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw=
+k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
+k8s.io/api v0.23.1 h1:ncu/qfBfUoClqwkTGbeRqqOqBCRoUAflMuOaOD7J0c8=
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
k8s.io/apiextensions-apiserver v0.20.6/go.mod h1:qO8YMqeMmZH+lV21LUNzV41vfpoE9QVAJRA+MNqj0mo=
@@ -1473,6 +1801,12 @@ k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRP
k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA=
k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE=
k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA=
+k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE=
+k8s.io/apiextensions-apiserver v0.22.1 h1:YSJYzlFNFSfUle+yeEXX0lSQyLEoxoPJySRupepb0gE=
+k8s.io/apiextensions-apiserver v0.22.1/go.mod h1:HeGmorjtRmRLE+Q8dJu6AYRoZccvCMsghwS8XTUYb2c=
+k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4=
+k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA=
+k8s.io/apiextensions-apiserver v0.23.1 h1:xxE0q1vLOVZiWORu1KwNRQFsGWtImueOrqSl13sS5EU=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
@@ -1481,6 +1815,12 @@ k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswP
k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc=
k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM=
+k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
+k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk=
+k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
@@ -1488,9 +1828,16 @@ k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg=
k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY=
k8s.io/apiserver v0.21.2 h1:vfGLD8biFXHzbcIEXyW3652lDwkV8tZEFJAaS2iuJlw=
k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw=
+k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU=
+k8s.io/apiserver v0.22.1 h1:Ul9Iv8OMB2s45h2tl5XWPpAZo1VPIJ/6N+MESeed7L8=
+k8s.io/apiserver v0.22.1/go.mod h1:2mcM6dzSt+XndzVQJX21Gx0/Klo7Aen7i0Ai6tIa400=
+k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4=
+k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI=
+k8s.io/apiserver v0.23.1 h1:vWGf8LcV9Pk/z5rdLmCiBDqE21ccbe930dzrtVMhw9g=
k8s.io/cli-runtime v0.20.6/go.mod h1:JVERW478qcxWrUjJuWQSqyJeiz9QC4T6jmBznHFBC8w=
k8s.io/cli-runtime v0.21.0 h1:/V2Kkxtf6x5NI2z+Sd/mIrq4FQyQ8jzZAUD6N5RnN7Y=
k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo=
+k8s.io/cli-runtime v0.23.1 h1:vHUZrq1Oejs0WaJnxs09mLHKScvIIl2hMSthhS8o8Yo=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
@@ -1498,12 +1845,21 @@ k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs=
k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0=
k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA=
+k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
+k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
+k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
+k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc=
+k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
+k8s.io/client-go v0.23.1 h1:Ma4Fhf/p07Nmj9yAB1H7UwbFHEBrSPg8lviR24U2GiQ=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/code-generator v0.20.6/go.mod h1:i6FmG+QxaLxvJsezvZp0q/gAEzzOz3U53KFibghWToU=
k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q=
k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q=
k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U=
+k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=
+k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
+k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
@@ -1511,6 +1867,12 @@ k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzd
k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA=
k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4=
k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc=
+k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ=
+k8s.io/component-base v0.22.1 h1:SFqIXsEN3v3Kkr1bS6rstrs1wd45StJqbtgbQ4nRQdo=
+k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo=
+k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M=
+k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug=
+k8s.io/component-base v0.23.1 h1:j/BqdZUWeWKCy2v/jcgnOJAzpRYWSbGcjGVYICko8Uc=
k8s.io/component-helpers v0.20.6/go.mod h1:d4rFhZS/wxrZCxRiJJiWf1mVGVeMB5/ey3Yv8/rOp78=
k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1527,21 +1889,32 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
k8s.io/kubectl v0.20.6/go.mod h1:yTCGVrlkBuQhFbKA1R65+lQ9hH7XeyOqUd0FUPFicPg=
k8s.io/kubectl v0.21.0 h1:WZXlnG/yjcE4LWO2g6ULjFxtzK6H1TKzsfaBFuVIhNg=
k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks=
+k8s.io/kubectl v0.23.1 h1:gmscOiV4Y4XIRIn14gQBBADoyyVrDZPbxRCTDga4RSA=
k8s.io/metrics v0.20.6/go.mod h1:d+OAIaXutom9kGWcBit/M8OkDpIzBKTsm47+KcUt7VI=
k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s=
k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176 h1:Mx0aa+SUAcNRQbs5jUzV8lkDlGFU8laZsY9jrcVX5SY=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
@@ -1550,26 +1923,45 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyz
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19 h1:0jaDAAxtqIrrqas4vtTqxct4xS5kHfRNycTRLTyJmVM=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 h1:DEQ12ZRxJjsglk5JIi5bLgpKaHihGervKmg5uryaEHw=
sigs.k8s.io/controller-runtime v0.8.0/go.mod h1:v9Lbj5oX443uR7GXYY46E0EE2o7k2YxQ58GxVNeXSW4=
sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8=
sigs.k8s.io/controller-runtime v0.9.2 h1:MnCAsopQno6+hI9SgJHKddzXpmv2wtouZz6931Eax+Q=
sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk=
+sigs.k8s.io/controller-runtime v0.10.0 h1:HgyZmMpjUOrtkaFtCnfxsR1bGRuFoAczSNbn2MoKj5U=
+sigs.k8s.io/controller-runtime v0.10.0/go.mod h1:GCdh6kqV6IY4LK0JLwX0Zm6g233RtVGdb/f0+KSfprg=
+sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ=
sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU=
sigs.k8s.io/controller-tools v0.6.0 h1:o2Fm1K7CmIp8OVaBtXsWB/ssBAzyoKZPPAGR3VuxaKs=
sigs.k8s.io/controller-tools v0.6.0/go.mod h1:baRMVPrctU77F+rfAuH2uPqW93k6yQnZA2dhUOr7ihc=
+sigs.k8s.io/controller-tools v0.6.2 h1:+Y8L0UsAugDipGRw8lrkPoAi6XqlQVZuf1DQHME3PgU=
+sigs.k8s.io/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8=
+sigs.k8s.io/controller-tools v0.7.0 h1:iZIz1vEcavyEfxjcTLs1WH/MPf4vhPCtTKhoHqV8/G0=
+sigs.k8s.io/controller-tools v0.7.0/go.mod h1:bpBAo0VcSDDLuWt47evLhMLPxRPxMDInTEH/YbdeMK0=
+sigs.k8s.io/controller-tools v0.8.0 h1:uUkfTGEwrguqYYfcI2RRGUnC8mYdCFDqfwPKUcNJh1o=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
sigs.k8s.io/kind v0.10.0/go.mod h1:fb32zUw7ewC47bPwLnwhf47wd/vADtv3c38KP7sjIlo=
sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20210702145813-742983631190/go.mod h1:pUhjQx9f/+cn1OtSa5zMohY1lgk9s/9Mbcvwj82lrNk=
sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20210803185103-51e4a9aa5055 h1:3f5m0xp0DEsg+Sdrt1Rld8TCEFOH5GKsoYrEpdOzA+s=
sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20210803185103-51e4a9aa5055/go.mod h1:pUhjQx9f/+cn1OtSa5zMohY1lgk9s/9Mbcvwj82lrNk=
+sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20211001202619-87eb9d55ecdc h1:7HqFWk9fT5OfkZvY2CVHVrkRZAgS7n9rqsISG64/958=
+sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20211001202619-87eb9d55ecdc/go.mod h1:I7A2uB9NFAENStOGkI96dvx9D0uPH2uAiwjSQcyFSkU=
+sigs.k8s.io/kubebuilder/v3 v3.2.0 h1:ttmajmBGZnqzV7RZd3zGGfj6dqJwGIzIdKsUX3cvYU8=
+sigs.k8s.io/kubebuilder/v3 v3.2.0/go.mod h1:I7A2uB9NFAENStOGkI96dvx9D0uPH2uAiwjSQcyFSkU=
+sigs.k8s.io/kubebuilder/v3 v3.3.0 h1:rl1d7qHajPDS83bM9IhR85jtEBTRZzQziWwAGYTsadE=
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/kustomize/api v0.8.5 h1:bfCXGXDAbFbb/Jv5AhMj2BB8a5VAJuuQ5/KU69WtDjQ=
sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY=
+sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0=
sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0=
sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk=
sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg=
sigs.k8s.io/kustomize/kyaml v0.10.21 h1:KdoEgz3HzmcaLUTFqs6aaqFpsaA9MVRIwOZbi8vMaD0=
sigs.k8s.io/kustomize/kyaml v0.10.21/go.mod h1:TYWhGwW9vjoRh3rWqBwB/ZOXyEGRVWe7Ggc3+KZIO+c=
+sigs.k8s.io/kustomize/kyaml v0.13.0 h1:9c+ETyNfSrVhxvphs+K2dzT3dh5oVPPEqPOE/cUpScY=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
@@ -1577,7 +1969,11 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/operator/.bingo/variables.env b/operator/.bingo/variables.env
index 149e92ef086bc..f54fb60dcb7f1 100644
--- a/operator/.bingo/variables.env
+++ b/operator/.bingo/variables.env
@@ -10,7 +10,7 @@ fi
BINGO="${GOBIN}/bingo-v0.5.2"
-CONTROLLER_GEN="${GOBIN}/controller-gen-v0.5.0"
+CONTROLLER_GEN="${GOBIN}/controller-gen-v0.8.0"
GOFUMPT="${GOBIN}/gofumpt-v0.1.1"
@@ -18,7 +18,7 @@ GOLANGCI_LINT="${GOBIN}/golangci-lint-v1.38.0"
KUSTOMIZE="${GOBIN}/kustomize-v3.8.7"
-OPERATOR_SDK="${GOBIN}/operator-sdk-v1.11.0"
+OPERATOR_SDK="${GOBIN}/operator-sdk-v1.18.1"
PROMTOOL="${GOBIN}/promtool-v1.8.2-0.20220211202545-56e14463bccf"
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 24ebaf4720c7a..0fd911525f622 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [5704](https://github.com/grafana/loki/pull/5704) **xperimental**: Update operator-sdk to 1.18.1
- [5693](https://github.com/grafana/loki/pull/5693) **periklis**: Replace frontend_worker parallelism with match_max_concurrent
- [5699](https://github.com/grafana/loki/pull/5699) **Red-GV**: Configure boltdb_shipper and schema to use Azure, GCS, and Swift storage
- [5701](https://github.com/grafana/loki/pull/5701) **sasagarw**: Make ReplicationFactor optional in LokiStack API
diff --git a/operator/Makefile b/operator/Makefile
index 2c81dba7709d2..1eb02b06d5f9f 100644
--- a/operator/Makefile
+++ b/operator/Makefile
@@ -52,6 +52,17 @@ REGISTRY_ORG ?= openshift-logging
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
BUNDLE_IMG ?= quay.io/$(REGISTRY_ORG)/loki-operator-bundle:$(VERSION)
+# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command
+BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(subst v,,$(VERSION)) $(BUNDLE_METADATA_OPTS)
+
+# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests
+# You can enable this value if you would like to use SHA Based Digests
+# To enable set flag to true
+USE_IMAGE_DIGESTS ?= false
+ifeq ($(USE_IMAGE_DIGESTS), true)
+ BUNDLE_GEN_FLAGS += --use-image-digests
+endif
+
CALCULATOR_IMG ?= quay.io/$(REGISTRY_ORG)/storage-size-calculator:latest
GO_FILES := $(shell find . -type f -name '*.go')
@@ -59,9 +70,6 @@ GO_FILES := $(shell find . -type f -name '*.go')
# Image URL to use all building/pushing image targets
IMG ?= quay.io/$(REGISTRY_ORG)/loki-operator:$(VERSION)
-# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
-CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"
-
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
@@ -69,6 +77,7 @@ else
GOBIN=$(shell go env GOBIN)
endif
+.PHONY: all
all: generate lint manager bin/loki-broker
OCI_RUNTIME ?= $(shell which podman || which docker)
@@ -86,6 +95,7 @@ OCI_RUNTIME ?= $(shell which podman || which docker)
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
+.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-24s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
@@ -97,25 +107,32 @@ deps: go.mod go.sum
go mod download
go mod verify
+.PHONY: cli
cli: deps bin/loki-broker ## Build loki-broker CLI binary
bin/loki-broker: $(GO_FILES) | generate
go build -o $@ ./cmd/loki-broker/
+.PHONY: manager
manager: deps generate ## Build manager binary
go build -o bin/manager main.go
+.PHONY: size-calculator
size-calculator: deps generate ## Build size-calculator binary
go build -o bin/size-calculator main.go
+.PHONY: go-generate
go-generate: ## Run go generate
go generate ./...
+.PHONY: generate
generate: $(CONTROLLER_GEN) ## Generate controller and crd code
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
+.PHONY: manifests
manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc.
- $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
+ $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
+.PHONY: test
test: deps generate go-generate lint lint-prometheus manifests ## Run tests
test: $(GO_FILES)
go test ./... -coverprofile cover.out
@@ -124,9 +141,11 @@ test: $(GO_FILES)
test-unit-prometheus: $(PROMTOOL) ## Run prometheus unit tests
@$(PROMTOOL) test rules ./internal/manifests/internal/alerts/testdata/test.yaml
+.PHONY: scorecard
scorecard: generate go-generate bundle ## Run scorecard test
$(OPERATOR_SDK) scorecard bundle
+.PHONY: lint
lint: $(GOLANGCI_LINT) | generate ## Run golangci-lint on source code.
$(GOLANGCI_LINT) run ./...
@@ -134,12 +153,15 @@ lint: $(GOLANGCI_LINT) | generate ## Run golangci-lint on source code.
lint-prometheus: $(PROMTOOL) ## Run promtool check against recording rules and alerts.
@$(PROMTOOL) check rules ./internal/manifests/internal/alerts/prometheus-alerts.yaml
+.PHONY: fmt
fmt: $(GOFUMPT) ## Run gofumpt on source code.
find . -type f -name '*.go' -not -path '**/fake_*.go' -exec $(GOFUMPT) -s -w {} \;
+.PHONY: oci-build
oci-build: ## Build the image
$(OCI_RUNTIME) build -t ${IMG} .
+.PHONY: oci-push
oci-push: ## Push the image
$(OCI_RUNTIME) push ${IMG}
@@ -147,7 +169,7 @@ oci-push: ## Push the image
bundle: manifests $(KUSTOMIZE) $(OPERATOR_SDK)
$(OPERATOR_SDK) generate kustomize manifests -q
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
- $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle -q --overwrite --version $(subst v,,$(VERSION)) $(BUNDLE_METADATA_OPTS)
+ $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS)
$(OPERATOR_SDK) bundle validate ./bundle
.PHONY: bundle-build
@@ -156,21 +178,30 @@ bundle-build: ## Build the bundle image.
##@ Deployment
+ifndef ignore-not-found
+ ignore-not-found = false
+endif
+
+.PHONY: run
run: generate manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
go run ./main.go
+.PHONY: install
install: manifests $(KUSTOMIZE) ## Install CRDs into a cluster
$(KUSTOMIZE) build config/crd | kubectl apply -f -
-uninstall: manifests $(KUSTOMIZE) ## Uninstall CRDs from a cluster
- $(KUSTOMIZE) build config/crd | kubectl delete -f -
+.PHONY: uninstall
+uninstall: manifests $(KUSTOMIZE) ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
+ $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
+.PHONY: deploy
deploy: manifests $(KUSTOMIZE) ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/overlays/development | kubectl apply -f -
-undeploy: ## Undeploy controller from the configured Kubernetes cluster in ~/.kube/config
- $(KUSTOMIZE) build config/overlays/development | kubectl delete -f -
+.PHONY: undeploy
+undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
+ $(KUSTOMIZE) build config/overlays/development | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
# Build and push the bundle image to a container registry.
.PHONY: olm-deploy-bundle
@@ -210,8 +241,10 @@ endif
undeploy-size-calculator: ## Undeploy storage size calculator
$(KUSTOMIZE) build config/overlays/openshift/size-calculator | kubectl delete -f -
+.PHONY: oci-build-calculator
oci-build-calculator: ## Build the calculator image
$(OCI_RUNTIME) build -f calculator.Dockerfile -t $(CALCULATOR_IMG) .
+.PHONY: oci-push-calculator
oci-push-calculator: ## Push the calculator image
$(OCI_RUNTIME) push $(CALCULATOR_IMG)
diff --git a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
index 75461dec629c1..6a3bd5b84d6a6 100644
--- a/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/manifests/loki-operator.clusterserviceversion.yaml
@@ -595,6 +595,8 @@ spec:
strategy: {}
template:
metadata:
+ annotations:
+ kubectl.kubernetes.io/default-container: manager
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
app.kubernetes.io/managed-by: operator-lifecycle-manager
@@ -647,7 +649,7 @@ spec:
- --logtostderr=true
- --tls-cert-file=/var/run/secrets/serving-cert/tls.crt
- --tls-private-key-file=/var/run/secrets/serving-cert/tls.key
- - --v=2
+ - --v=0
image: quay.io/openshift/origin-kube-rbac-proxy:latest
name: kube-rbac-proxy
ports:
@@ -713,4 +715,11 @@ spec:
minKubeVersion: 1.21.1
provider:
name: Grafana.com
+ relatedImages:
+ - image: quay.io/observatorium/opa-openshift:latest
+ name: opa
+ - image: quay.io/openshift-logging/loki:v2.4.2
+ name: loki
+ - image: quay.io/observatorium/api:latest
+ name: gateway
version: 0.0.1
diff --git a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
index 291b116135767..44f87761cdb76 100644
--- a/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/manifests/loki.grafana.com_lokistacks.yaml
@@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.5.0
+ controller-gen.kubebuilder.io/version: v0.8.0
creationTimestamp: null
labels:
app.kubernetes.io/instance: loki-operator-v0.0.1
@@ -881,13 +881,12 @@ spec:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ type FooStatus struct{ // Represents the observations of a foo's
+ current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go
index 5f89210077b7e..cc7ed61df87ca 100644
--- a/operator/cmd/loki-broker/main.go
+++ b/operator/cmd/loki-broker/main.go
@@ -9,6 +9,7 @@ import (
"strings"
"github.com/ViaQ/logerr/log"
+ "github.com/go-logr/logr"
"github.com/grafana/loki/operator/api/v1beta1"
"github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/manifests/storage"
@@ -54,7 +55,7 @@ func (c *config) registerFlags(f *flag.FlagSet) {
f.StringVar(&c.writeToDir, "output.write-dir", "", "write each file to the specified directory.")
}
-func (c *config) validateFlags() {
+func (c *config) validateFlags(log logr.Logger) {
if cfg.crFilepath == "" {
log.Info("-custom.resource.path flag is required")
os.Exit(1)
@@ -94,28 +95,29 @@ func (c *config) validateFlags() {
var cfg *config
func init() {
- log.Init("loki-broker")
cfg = &config{}
}
func main() {
+ logger := log.NewLogger("loki-broker")
+
f := flag.NewFlagSet("", flag.ExitOnError)
cfg.registerFlags(f)
if err := f.Parse(os.Args[1:]); err != nil {
- log.Error(err, "failed to parse flags")
+ logger.Error(err, "failed to parse flags")
}
- cfg.validateFlags()
+ cfg.validateFlags(logger)
b, err := ioutil.ReadFile(cfg.crFilepath)
if err != nil {
- log.Info("failed to read custom resource file", "path", cfg.crFilepath)
+ logger.Info("failed to read custom resource file", "path", cfg.crFilepath)
os.Exit(1)
}
ls := &v1beta1.LokiStack{}
if err = yaml.Unmarshal(b, ls); err != nil {
- log.Error(err, "failed to unmarshal LokiStack CR", "path", cfg.crFilepath)
+ logger.Error(err, "failed to unmarshal LokiStack CR", "path", cfg.crFilepath)
os.Exit(1)
}
@@ -130,20 +132,20 @@ func main() {
}
if optErr := manifests.ApplyDefaultSettings(&opts); optErr != nil {
- log.Error(optErr, "failed to conform options to build settings")
+ logger.Error(optErr, "failed to conform options to build settings")
os.Exit(1)
}
objects, err := manifests.BuildAll(opts)
if err != nil {
- log.Error(err, "failed to build manifests")
+ logger.Error(err, "failed to build manifests")
os.Exit(1)
}
for _, o := range objects {
b, err := yaml.Marshal(o)
if err != nil {
- log.Error(err, "failed to marshal manifest", "name", o.GetName(), "kind", o.GetObjectKind())
+ logger.Error(err, "failed to marshal manifest", "name", o.GetName(), "kind", o.GetObjectKind())
continue
}
@@ -151,7 +153,7 @@ func main() {
basename := fmt.Sprintf("%s-%s.yaml", o.GetObjectKind().GroupVersionKind().Kind, o.GetName())
fname := strings.ToLower(path.Join(cfg.writeToDir, basename))
if err := ioutil.WriteFile(fname, b, 0o644); err != nil {
- log.Error(err, "failed to write file to directory", "path", fname)
+ logger.Error(err, "failed to write file to directory", "path", fname)
os.Exit(1)
}
} else {
diff --git a/operator/cmd/size-calculator/main.go b/operator/cmd/size-calculator/main.go
index b92c718dddc17..09878d05d884e 100755
--- a/operator/cmd/size-calculator/main.go
+++ b/operator/cmd/size-calculator/main.go
@@ -29,33 +29,31 @@ const (
sizeOneXMedium string = "1x.medium"
)
-func init() {
- log.Init("size-calculator")
-}
+var logger = log.NewLogger("size-calculator")
func main() {
- log.Info("starting storage size calculator...")
+ logger.Info("starting storage size calculator...")
for {
duration, parseErr := model.ParseDuration(defaultDuration)
if parseErr != nil {
- log.Error(parseErr, "failed to parse duration")
+ logger.Error(parseErr, "failed to parse duration")
os.Exit(1)
}
logsCollected, err := sizes.PredictFor(duration)
if err != nil {
- log.Error(err, "Failed to collect metrics data")
+ logger.Error(err, "Failed to collect metrics data")
os.Exit(1)
}
logsCollectedInGB := int(math.Ceil(logsCollected / math.Pow(1024, 3)))
- log.Info(fmt.Sprintf("Amount of logs expected in 24 hours is %f Bytes or %dGB", logsCollected, logsCollectedInGB))
+ logger.Info(fmt.Sprintf("Amount of logs expected in 24 hours is %f Bytes or %dGB", logsCollected, logsCollectedInGB))
if logsCollectedInGB <= range1xSmall {
- log.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXSmall))
+ logger.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXSmall))
} else {
- log.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXMedium))
+ logger.Info(fmt.Sprintf("Recommended t-shirt size for %dGB is %s", logsCollectedInGB, sizeOneXMedium))
}
time.Sleep(1 * time.Minute)
diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
index c8bce14cc304c..6e43ce06d871c 100644
--- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
@@ -1,10 +1,9 @@
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.5.0
+ controller-gen.kubebuilder.io/version: v0.8.0
creationTimestamp: null
name: lokistacks.loki.grafana.com
spec:
@@ -24,10 +23,14 @@ spec:
description: LokiStack is the Schema for the lokistacks API
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
- description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@@ -35,109 +38,143 @@ spec:
description: LokiStackSpec defines the desired state of LokiStack
properties:
limits:
- description: Limits defines the limits to be applied to log stream processing.
+ description: Limits defines the limits to be applied to log stream
+ processing.
properties:
global:
- description: Global defines the limits applied globally across the cluster.
+ description: Global defines the limits applied globally across
+ the cluster.
properties:
ingestion:
- description: IngestionLimits defines the limits applied on ingested log streams.
+ description: IngestionLimits defines the limits applied on
+ ingested log streams.
properties:
ingestionBurstSize:
- description: IngestionBurstSize defines the local rate-limited sample size per distributor replica. It should be set to the set at least to the maximum logs size expected in a single push request.
+ description: IngestionBurstSize defines the local rate-limited
+ sample size per distributor replica. It should be set
+ to the set at least to the maximum logs size expected
+ in a single push request.
format: int32
type: integer
ingestionRate:
- description: IngestionRate defines the sample size per second. Units MB.
+ description: IngestionRate defines the sample size per
+ second. Units MB.
format: int32
type: integer
maxGlobalStreamsPerTenant:
- description: MaxGlobalStreamsPerTenant defines the maximum number of active streams per tenant, across the cluster.
+ description: MaxGlobalStreamsPerTenant defines the maximum
+ number of active streams per tenant, across the cluster.
format: int32
type: integer
maxLabelNameLength:
- description: MaxLabelNameLength defines the maximum number of characters allowed for label keys in log streams.
+ description: MaxLabelNameLength defines the maximum number
+ of characters allowed for label keys in log streams.
format: int32
type: integer
maxLabelNamesPerSeries:
- description: MaxLabelNamesPerSeries defines the maximum number of label names per series in each log stream.
+ description: MaxLabelNamesPerSeries defines the maximum
+ number of label names per series in each log stream.
format: int32
type: integer
maxLabelValueLength:
- description: MaxLabelValueLength defines the maximum number of characters allowed for label values in log streams.
+ description: MaxLabelValueLength defines the maximum number
+ of characters allowed for label values in log streams.
format: int32
type: integer
maxLineSize:
- description: MaxLineSize defines the maximum line size on ingestion path. Units in Bytes.
+ description: MaxLineSize defines the maximum line size
+ on ingestion path. Units in Bytes.
format: int32
type: integer
type: object
queries:
- description: QueryLimits defines the limit applied on querying log streams.
+ description: QueryLimits defines the limit applied on querying
+ log streams.
properties:
maxChunksPerQuery:
- description: MaxChunksPerQuery defines the maximum number of chunks that can be fetched by a single query.
+ description: MaxChunksPerQuery defines the maximum number
+ of chunks that can be fetched by a single query.
format: int32
type: integer
maxEntriesLimitPerQuery:
- description: MaxEntriesLimitsPerQuery defines the maximum number of log entries that will be returned for a query.
+ description: MaxEntriesLimitsPerQuery defines the maximum
+ number of log entries that will be returned for a query.
format: int32
type: integer
maxQuerySeries:
- description: MaxQuerySeries defines the the maximum of unique series that is returned by a metric query.
+ description: MaxQuerySeries defines the the maximum of
+ unique series that is returned by a metric query.
format: int32
type: integer
type: object
type: object
tenants:
additionalProperties:
- description: LimitsTemplateSpec defines the limits applied at ingestion or query path.
+ description: LimitsTemplateSpec defines the limits applied
+ at ingestion or query path.
properties:
ingestion:
- description: IngestionLimits defines the limits applied on ingested log streams.
+ description: IngestionLimits defines the limits applied
+ on ingested log streams.
properties:
ingestionBurstSize:
- description: IngestionBurstSize defines the local rate-limited sample size per distributor replica. It should be set to the set at least to the maximum logs size expected in a single push request.
+ description: IngestionBurstSize defines the local rate-limited
+ sample size per distributor replica. It should be
+ set to the set at least to the maximum logs size expected
+ in a single push request.
format: int32
type: integer
ingestionRate:
- description: IngestionRate defines the sample size per second. Units MB.
+ description: IngestionRate defines the sample size per
+ second. Units MB.
format: int32
type: integer
maxGlobalStreamsPerTenant:
- description: MaxGlobalStreamsPerTenant defines the maximum number of active streams per tenant, across the cluster.
+ description: MaxGlobalStreamsPerTenant defines the maximum
+ number of active streams per tenant, across the cluster.
format: int32
type: integer
maxLabelNameLength:
- description: MaxLabelNameLength defines the maximum number of characters allowed for label keys in log streams.
+ description: MaxLabelNameLength defines the maximum
+ number of characters allowed for label keys in log
+ streams.
format: int32
type: integer
maxLabelNamesPerSeries:
- description: MaxLabelNamesPerSeries defines the maximum number of label names per series in each log stream.
+ description: MaxLabelNamesPerSeries defines the maximum
+ number of label names per series in each log stream.
format: int32
type: integer
maxLabelValueLength:
- description: MaxLabelValueLength defines the maximum number of characters allowed for label values in log streams.
+ description: MaxLabelValueLength defines the maximum
+ number of characters allowed for label values in log
+ streams.
format: int32
type: integer
maxLineSize:
- description: MaxLineSize defines the maximum line size on ingestion path. Units in Bytes.
+ description: MaxLineSize defines the maximum line size
+ on ingestion path. Units in Bytes.
format: int32
type: integer
type: object
queries:
- description: QueryLimits defines the limit applied on querying log streams.
+ description: QueryLimits defines the limit applied on querying
+ log streams.
properties:
maxChunksPerQuery:
- description: MaxChunksPerQuery defines the maximum number of chunks that can be fetched by a single query.
+ description: MaxChunksPerQuery defines the maximum number
+ of chunks that can be fetched by a single query.
format: int32
type: integer
maxEntriesLimitPerQuery:
- description: MaxEntriesLimitsPerQuery defines the maximum number of log entries that will be returned for a query.
+ description: MaxEntriesLimitsPerQuery defines the maximum
+ number of log entries that will be returned for a
+ query.
format: int32
type: integer
maxQuerySeries:
- description: MaxQuerySeries defines the the maximum of unique series that is returned by a metric query.
+ description: MaxQuerySeries defines the the maximum
+ of unique series that is returned by a metric query.
format: int32
type: integer
type: object
@@ -147,7 +184,8 @@ spec:
type: object
managementState:
default: Managed
- description: ManagementState defines if the CR should be managed by the operator or not. Default is managed.
+ description: ManagementState defines if the CR should be managed by
+ the operator or not. Default is managed.
enum:
- Managed
- Unmanaged
@@ -158,20 +196,24 @@ spec:
minimum: 1
type: integer
size:
- description: Size defines one of the support Loki deployment scale out sizes.
+ description: Size defines one of the support Loki deployment scale
+ out sizes.
enum:
- 1x.extra-small
- 1x.small
- 1x.medium
type: string
storage:
- description: Storage defines the spec for the object storage endpoint to store logs.
+ description: Storage defines the spec for the object storage endpoint
+ to store logs.
properties:
secret:
- description: Secret for object storage authentication. Name of a secret in the same namespace as the cluster logging operator.
+ description: Secret for object storage authentication. Name of
+ a secret in the same namespace as the cluster logging operator.
properties:
name:
- description: Name of a secret in the namespace configured for object storage secrets.
+ description: Name of a secret in the namespace configured
+ for object storage secrets.
type: string
type:
description: Type of object storage that should be used
@@ -189,10 +231,12 @@ spec:
- secret
type: object
storageClassName:
- description: Storage class name defines the storage class for ingester/querier PVCs.
+ description: Storage class name defines the storage class for ingester/querier
+ PVCs.
type: string
template:
- description: Template defines the resource/limits/tolerations/nodeselectors per component
+ description: Template defines the resource/limits/tolerations/nodeselectors
+ per component
properties:
compactor:
description: Compactor defines the compaction component spec.
@@ -200,32 +244,54 @@ spec:
nodeSelector:
additionalProperties:
type: string
- description: NodeSelector defines the labels required by a node to schedule the component onto it.
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
type: object
replicas:
- description: Replicas defines the number of replica pods of the component.
+ description: Replicas defines the number of replica pods of
+ the component.
format: int32
type: integer
tolerations:
- description: Tolerations defines the tolerations required by a node to schedule the component onto it.
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
items:
- description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
properties:
effect:
- description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
type: string
key:
- description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
type: string
operator:
- description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
type: string
tolerationSeconds:
- description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
- description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
type: string
type: object
type: array
@@ -236,32 +302,54 @@ spec:
nodeSelector:
additionalProperties:
type: string
- description: NodeSelector defines the labels required by a node to schedule the component onto it.
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
type: object
replicas:
- description: Replicas defines the number of replica pods of the component.
+ description: Replicas defines the number of replica pods of
+ the component.
format: int32
type: integer
tolerations:
- description: Tolerations defines the tolerations required by a node to schedule the component onto it.
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
items:
- description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
properties:
effect:
- description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
type: string
key:
- description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
type: string
operator:
- description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
type: string
tolerationSeconds:
- description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
- description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
type: string
type: object
type: array
@@ -272,68 +360,113 @@ spec:
nodeSelector:
additionalProperties:
type: string
- description: NodeSelector defines the labels required by a node to schedule the component onto it.
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
type: object
replicas:
- description: Replicas defines the number of replica pods of the component.
+ description: Replicas defines the number of replica pods of
+ the component.
format: int32
type: integer
tolerations:
- description: Tolerations defines the tolerations required by a node to schedule the component onto it.
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
items:
- description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
properties:
effect:
- description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
type: string
key:
- description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
type: string
operator:
- description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
type: string
tolerationSeconds:
- description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
- description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
indexGateway:
- description: IndexGateway defines the index gateway component spec.
+ description: IndexGateway defines the index gateway component
+ spec.
properties:
nodeSelector:
additionalProperties:
type: string
- description: NodeSelector defines the labels required by a node to schedule the component onto it.
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
type: object
replicas:
- description: Replicas defines the number of replica pods of the component.
+ description: Replicas defines the number of replica pods of
+ the component.
format: int32
type: integer
tolerations:
- description: Tolerations defines the tolerations required by a node to schedule the component onto it.
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
items:
- description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
properties:
effect:
- description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
type: string
key:
- description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
type: string
operator:
- description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
type: string
tolerationSeconds:
- description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
- description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
type: string
type: object
type: array
@@ -344,32 +477,54 @@ spec:
nodeSelector:
additionalProperties:
type: string
- description: NodeSelector defines the labels required by a node to schedule the component onto it.
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
type: object
replicas:
- description: Replicas defines the number of replica pods of the component.
+ description: Replicas defines the number of replica pods of
+ the component.
format: int32
type: integer
tolerations:
- description: Tolerations defines the tolerations required by a node to schedule the component onto it.
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
items:
- description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
properties:
effect:
- description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
type: string
key:
- description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
type: string
operator:
- description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
type: string
tolerationSeconds:
- description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
- description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
type: string
type: object
type: array
@@ -380,83 +535,132 @@ spec:
nodeSelector:
additionalProperties:
type: string
- description: NodeSelector defines the labels required by a node to schedule the component onto it.
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
type: object
replicas:
- description: Replicas defines the number of replica pods of the component.
+ description: Replicas defines the number of replica pods of
+ the component.
format: int32
type: integer
tolerations:
- description: Tolerations defines the tolerations required by a node to schedule the component onto it.
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
items:
- description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
properties:
effect:
- description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
type: string
key:
- description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
type: string
operator:
- description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
type: string
tolerationSeconds:
- description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
- description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
queryFrontend:
- description: QueryFrontend defines the query frontend component spec.
+ description: QueryFrontend defines the query frontend component
+ spec.
properties:
nodeSelector:
additionalProperties:
type: string
- description: NodeSelector defines the labels required by a node to schedule the component onto it.
+ description: NodeSelector defines the labels required by a
+ node to schedule the component onto it.
type: object
replicas:
- description: Replicas defines the number of replica pods of the component.
+ description: Replicas defines the number of replica pods of
+ the component.
format: int32
type: integer
tolerations:
- description: Tolerations defines the tolerations required by a node to schedule the component onto it.
+ description: Tolerations defines the tolerations required
+ by a node to schedule the component onto it.
items:
- description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ description: The pod this Toleration is attached to tolerates
+ any taint that matches the triple <key,value,effect> using
+ the matching operator <operator>.
properties:
effect:
- description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ description: Effect indicates the taint effect to match.
+ Empty means match all taint effects. When specified,
+ allowed values are NoSchedule, PreferNoSchedule and
+ NoExecute.
type: string
key:
- description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ description: Key is the taint key that the toleration
+ applies to. Empty means match all taint keys. If the
+ key is empty, operator must be Exists; this combination
+ means to match all values and all keys.
type: string
operator:
- description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ description: Operator represents a key's relationship
+ to the value. Valid operators are Exists and Equal.
+ Defaults to Equal. Exists is equivalent to wildcard
+ for value, so that a pod can tolerate all taints of
+ a particular category.
type: string
tolerationSeconds:
- description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ description: TolerationSeconds represents the period
+ of time the toleration (which must be of effect NoExecute,
+ otherwise this field is ignored) tolerates the taint.
+ By default, it is not set, which means tolerate the
+ taint forever (do not evict). Zero and negative values
+ will be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
- description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ description: Value is the taint value the toleration
+ matches to. If the operator is Exists, the value should
+ be empty, otherwise just a regular string.
type: string
type: object
type: array
type: object
type: object
tenants:
- description: Tenants defines the per-tenant authentication and authorization spec for the lokistack-gateway component.
+ description: Tenants defines the per-tenant authentication and authorization
+ spec for the lokistack-gateway component.
properties:
authentication:
- description: Authentication defines the lokistack-gateway component authentication configuration spec per tenant.
+ description: Authentication defines the lokistack-gateway component
+ authentication configuration spec per tenant.
items:
- description: AuthenticationSpec defines the oidc configuration per tenant for lokiStack Gateway component.
+ description: AuthenticationSpec defines the oidc configuration
+ per tenant for lokiStack Gateway component.
properties:
oidc:
- description: OIDC defines the spec for the OIDC tenant's authentication.
+ description: OIDC defines the spec for the OIDC tenant's
+ authentication.
properties:
groupClaim:
type: string
@@ -467,10 +671,12 @@ spec:
description: RedirectURL defines the URL for redirect.
type: string
secret:
- description: Secret defines the spec for the clientID, clientSecret and issuerCAPath for tenant's authentication.
+ description: Secret defines the spec for the clientID,
+ clientSecret and issuerCAPath for tenant's authentication.
properties:
name:
- description: Name of a secret in the namespace configured for tenant secrets.
+ description: Name of a secret in the namespace configured
+ for tenant secrets.
type: string
required:
- name
@@ -497,21 +703,26 @@ spec:
type: object
type: array
authorization:
- description: Authorization defines the lokistack-gateway component authorization configuration spec per tenant.
+ description: Authorization defines the lokistack-gateway component
+ authorization configuration spec per tenant.
properties:
opa:
- description: OPA defines the spec for the third-party endpoint for tenant's authorization.
+ description: OPA defines the spec for the third-party endpoint
+ for tenant's authorization.
properties:
url:
- description: URL defines the third-party endpoint for authorization.
+ description: URL defines the third-party endpoint for
+ authorization.
type: string
required:
- url
type: object
roleBindings:
- description: RoleBindings defines configuration to bind a set of roles to a set of subjects.
+ description: RoleBindings defines configuration to bind a
+ set of roles to a set of subjects.
items:
- description: RoleBindingsSpec binds a set of roles to a set of subjects.
+ description: RoleBindingsSpec binds a set of roles to a
+ set of subjects.
properties:
name:
type: string
@@ -521,10 +732,12 @@ spec:
type: array
subjects:
items:
- description: Subject represents a subject that has been bound to a role.
+ description: Subject represents a subject that has
+ been bound to a role.
properties:
kind:
- description: SubjectKind is a kind of LokiStack Gateway RBAC subject.
+ description: SubjectKind is a kind of LokiStack
+ Gateway RBAC subject.
enum:
- user
- group
@@ -543,15 +756,18 @@ spec:
type: object
type: array
roles:
- description: Roles defines a set of permissions to interact with a tenant.
+ description: Roles defines a set of permissions to interact
+ with a tenant.
items:
- description: RoleSpec describes a set of permissions to interact with a tenant.
+ description: RoleSpec describes a set of permissions to
+ interact with a tenant.
properties:
name:
type: string
permissions:
items:
- description: PermissionType is a LokiStack Gateway RBAC permission.
+ description: PermissionType is a LokiStack Gateway
+ RBAC permission.
enum:
- read
- write
@@ -575,7 +791,8 @@ spec:
type: object
mode:
default: openshift-logging
- description: Mode defines the mode in which lokistack-gateway component will be configured.
+ description: Mode defines the mode in which lokistack-gateway
+ component will be configured.
enum:
- static
- dynamic
@@ -593,78 +810,107 @@ spec:
description: LokiStackStatus defines the observed state of LokiStack
properties:
components:
- description: Components provides summary of all Loki pod status grouped per component.
+ description: Components provides summary of all Loki pod status grouped
+ per component.
properties:
compactor:
additionalProperties:
items:
type: string
type: array
- description: Compactor is a map to the pod status of the compactor pod.
+ description: Compactor is a map to the pod status of the compactor
+ pod.
type: object
distributor:
additionalProperties:
items:
type: string
type: array
- description: Distributor is a map to the per pod status of the distributor deployment
+ description: Distributor is a map to the per pod status of the
+ distributor deployment
type: object
gateway:
additionalProperties:
items:
type: string
type: array
- description: Gateway is a map to the per pod status of the lokistack gateway deployment.
+ description: Gateway is a map to the per pod status of the lokistack
+ gateway deployment.
type: object
indexGateway:
additionalProperties:
items:
type: string
type: array
- description: IndexGateway is a map to the per pod status of the index gateway statefulset
+ description: IndexGateway is a map to the per pod status of the
+ index gateway statefulset
type: object
ingester:
additionalProperties:
items:
type: string
type: array
- description: Ingester is a map to the per pod status of the ingester statefulset
+ description: Ingester is a map to the per pod status of the ingester
+ statefulset
type: object
querier:
additionalProperties:
items:
type: string
type: array
- description: Querier is a map to the per pod status of the querier deployment
+ description: Querier is a map to the per pod status of the querier
+ deployment
type: object
queryFrontend:
additionalProperties:
items:
type: string
type: array
- description: QueryFrontend is a map to the per pod status of the query frontend deployment
+ description: QueryFrontend is a map to the per pod status of the
+ query frontend deployment
type: object
type: object
conditions:
description: Conditions of the Loki deployment health.
items:
- description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource. --- This struct is intended for direct
+ use as an array at the field path .status.conditions. For example,
+ type FooStatus struct{ // Represents the observations of a foo's
+ current state. // Known .status.conditions.type are: \"Available\",
+ \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
+ // +listType=map // +listMapKey=type Conditions []metav1.Condition
+ `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
+ protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ description: lastTransitionTime is the last time the condition
+ transitioned from one status to another. This should be when
+ the underlying condition changed. If that is not known, then
+ using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating details about the transition. This may be an empty string.
+ description: message is a human readable message indicating
+ details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
+ description: observedGeneration represents the .metadata.generation
+ that the condition was set based upon. For instance, if .metadata.generation
+ is currently 12, but the .status.conditions[x].observedGeneration
+ is 9, the condition is out of date with respect to the current
+ state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
+ description: reason contains a programmatic identifier indicating
+ the reason for the condition's last transition. Producers
+ of specific condition types may define expected values and
+ meanings for this field, and whether the values are considered
+ a guaranteed API. The value should be a CamelCase string.
+ This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
@@ -677,7 +923,11 @@ spec:
- Unknown
type: string
type:
- description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ --- Many .condition.type values are consistent across resources
+ like Available, but because arbitrary conditions can be useful
+ (see .node.status.conditions), the ability to deconflict is
+ important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
diff --git a/operator/config/manager/manager.yaml b/operator/config/manager/manager.yaml
index c749d9433fe5e..1ec47d9d71da2 100644
--- a/operator/config/manager/manager.yaml
+++ b/operator/config/manager/manager.yaml
@@ -2,6 +2,8 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
+ labels:
+ control-plane: controller-manager
spec:
selector:
matchLabels:
@@ -9,6 +11,8 @@ spec:
replicas: 1
template:
metadata:
+ annotations:
+ kubectl.kubernetes.io/default-container: manager
labels:
name: loki-operator-controller-manager
spec:
diff --git a/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml b/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml
index 750338a75344e..6d1e5c29c78f3 100644
--- a/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml
+++ b/operator/config/overlays/openshift/manager_auth_proxy_patch.yaml
@@ -14,7 +14,7 @@ spec:
- "--logtostderr=true"
- "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt"
- "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key"
- - "--v=2"
+ - "--v=0"
ports:
- containerPort: 8443
name: https
diff --git a/operator/config/overlays/production/manager_auth_proxy_patch.yaml b/operator/config/overlays/production/manager_auth_proxy_patch.yaml
index 64d222654d57c..ce4d09b9f2a06 100644
--- a/operator/config/overlays/production/manager_auth_proxy_patch.yaml
+++ b/operator/config/overlays/production/manager_auth_proxy_patch.yaml
@@ -16,7 +16,7 @@ spec:
- "--logtostderr=true"
- "--tls-cert-file=/var/run/secrets/serving-cert/tls.crt"
- "--tls-private-key-file=/var/run/secrets/serving-cert/tls.key"
- - "--v=2"
+ - "--v=0"
ports:
- containerPort: 8443
name: https
diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml
index 390f1ac27cb17..d9d10d73f1801 100644
--- a/operator/config/rbac/role.yaml
+++ b/operator/config/rbac/role.yaml
@@ -1,4 +1,3 @@
-
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
diff --git a/operator/controllers/internal/management/state/state.go b/operator/controllers/internal/management/state/state.go
index a8b6d1905efaf..b3ff994ea9de2 100644
--- a/operator/controllers/internal/management/state/state.go
+++ b/operator/controllers/internal/management/state/state.go
@@ -3,18 +3,17 @@ package state
import (
"context"
- "github.com/ViaQ/logerr/kverrors"
- "github.com/ViaQ/logerr/log"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s"
+ "github.com/ViaQ/logerr/kverrors"
+ "github.com/go-logr/logr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
-
ctrl "sigs.k8s.io/controller-runtime"
)
// IsManaged checks if the custom resource is configured with ManagementState Managed.
-func IsManaged(ctx context.Context, req ctrl.Request, k k8s.Client) (bool, error) {
+func IsManaged(ctx context.Context, log logr.Logger, req ctrl.Request, k k8s.Client) (bool, error) {
ll := log.WithValues("lokistack", req.NamespacedName)
var stack lokiv1beta1.LokiStack
diff --git a/operator/controllers/internal/management/state/state_test.go b/operator/controllers/internal/management/state/state_test.go
index 4de0b42a520c0..b5c7edba2e5c7 100644
--- a/operator/controllers/internal/management/state/state_test.go
+++ b/operator/controllers/internal/management/state/state_test.go
@@ -4,21 +4,23 @@ import (
"context"
"testing"
- "github.com/ViaQ/logerr/kverrors"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
"github.com/grafana/loki/operator/controllers/internal/management/state"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
- "github.com/stretchr/testify/require"
+ "github.com/ViaQ/logerr/kverrors"
+ "github.com/ViaQ/logerr/log"
+ "github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
-
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
+var logger = log.DefaultLogger()
+
func TestIsManaged(t *testing.T) {
type test struct {
name string
@@ -74,7 +76,7 @@ func TestIsManaged(t *testing.T) {
k.SetClientObject(object, &tst.stack)
return nil
}
- ok, err := state.IsManaged(context.TODO(), r, k)
+ ok, err := state.IsManaged(context.TODO(), logger, r, k)
require.NoError(t, err)
require.Equal(t, ok, tst.wantOk)
})
@@ -110,7 +112,7 @@ func TestIsManaged_WhenError_ReturnNotManagedWithError(t *testing.T) {
for _, tst := range table {
t.Run(tst.name, func(t *testing.T) {
k.GetReturns(tst.apierror)
- ok, err := state.IsManaged(context.TODO(), r, k)
+ ok, err := state.IsManaged(context.TODO(), logger, r, k)
require.Equal(t, tst.wantErr, err)
require.False(t, ok)
})
diff --git a/operator/controllers/lokistack_controller.go b/operator/controllers/lokistack_controller.go
index 7028b395f655c..2ce1191acc7fd 100644
--- a/operator/controllers/lokistack_controller.go
+++ b/operator/controllers/lokistack_controller.go
@@ -92,7 +92,7 @@ type LokiStackReconciler struct {
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
func (r *LokiStackReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
- ok, err := state.IsManaged(ctx, req, r.Client)
+ ok, err := state.IsManaged(ctx, r.Log, req, r.Client)
if err != nil {
return ctrl.Result{
Requeue: true,
@@ -105,7 +105,7 @@ func (r *LokiStackReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, nil
}
- err = handlers.CreateOrUpdateLokiStack(ctx, req, r.Client, r.Scheme, r.Flags)
+ err = handlers.CreateOrUpdateLokiStack(ctx, r.Log, req, r.Client, r.Scheme, r.Flags)
if err != nil {
return ctrl.Result{
Requeue: true,
diff --git a/operator/controllers/lokistack_controller_test.go b/operator/controllers/lokistack_controller_test.go
index 187d6d7e5d5fa..801c5d8137ca4 100644
--- a/operator/controllers/lokistack_controller_test.go
+++ b/operator/controllers/lokistack_controller_test.go
@@ -6,13 +6,13 @@ import (
"os"
"testing"
- "github.com/ViaQ/logerr/log"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/manifests"
+
+ "github.com/ViaQ/logerr/log"
routev1 "github.com/openshift/api/route/v1"
"github.com/stretchr/testify/require"
-
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
@@ -24,21 +24,22 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)
-var scheme = runtime.NewScheme()
+var (
+ logger = log.NewLogger("testing")
+
+ scheme = runtime.NewScheme()
+)
func TestMain(m *testing.M) {
testing.Init()
flag.Parse()
+ sink := log.MustGetSink(logger)
if testing.Verbose() {
// set to the highest for verbose testing
- log.SetLogLevel(5)
+ sink.SetVerbosity(5)
} else {
- if err := log.SetOutput(ioutil.Discard); err != nil {
- // This would only happen if the default logger was changed which it hasn't so
- // we can assume that a panic is necessary and the developer is to blame.
- panic(err)
- }
+ sink.SetOutput(ioutil.Discard)
}
// Register the clientgo and CRD schemes
@@ -46,7 +47,6 @@ func TestMain(m *testing.M) {
utilruntime.Must(routev1.AddToScheme(scheme))
utilruntime.Must(lokiv1beta1.AddToScheme(scheme))
- log.Init("testing")
os.Exit(m.Run())
}
diff --git a/operator/go.mod b/operator/go.mod
index ed492937ec9c2..39f585e07afb0 100644
--- a/operator/go.mod
+++ b/operator/go.mod
@@ -3,26 +3,26 @@ module github.com/grafana/loki/operator
go 1.17
require (
- github.com/ViaQ/logerr v1.0.10
- github.com/go-logr/logr v0.4.0
+ github.com/ViaQ/logerr v1.1.0
+ github.com/go-logr/logr v1.2.3
github.com/google/uuid v1.1.2
github.com/imdario/mergo v0.3.12
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0
- github.com/openshift/api v0.0.0-20210901140736-d8ed1449662d // release-4.9
+ github.com/openshift/api v0.0.0-20220124143425-d74727069f6f // release-4.10
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.0
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/common v0.32.0
- github.com/stretchr/testify v1.7.0
- k8s.io/api v0.22.1
- k8s.io/apimachinery v0.22.1
- k8s.io/client-go v0.22.1
- k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
- sigs.k8s.io/controller-runtime v0.9.2
- sigs.k8s.io/yaml v1.2.0
+ github.com/stretchr/testify v1.7.1
+ k8s.io/api v0.23.5
+ k8s.io/apimachinery v0.23.5
+ k8s.io/client-go v0.23.5
+ k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
+ sigs.k8s.io/controller-runtime v0.11.0
+ sigs.k8s.io/yaml v1.3.0
)
require (
- cloud.google.com/go v0.65.0 // indirect
+ cloud.google.com/go v0.81.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
@@ -32,45 +32,46 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/evanphx/json-patch v4.11.0+incompatible // indirect
+ github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
- github.com/fsnotify/fsnotify v1.4.9 // indirect
+ github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
- github.com/google/go-cmp v0.5.5 // indirect
- github.com/google/gofuzz v1.1.0 // indirect
+ github.com/google/go-cmp v0.5.7 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
- github.com/json-iterator/go v1.1.11 // indirect
+ github.com/json-iterator/go v1.1.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.1 // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
- golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect
+ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/mod v0.4.2 // indirect
- golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
- golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect
- golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect
- golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
- golang.org/x/text v0.3.6 // indirect
+ golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
+ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
+ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
+ golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
- golang.org/x/tools v0.1.2 // indirect
+ golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/protobuf v1.26.0 // indirect
+ google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
- k8s.io/apiextensions-apiserver v0.21.2 // indirect
- k8s.io/component-base v0.21.2 // indirect
- k8s.io/klog/v2 v2.9.0 // indirect
- k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
+ k8s.io/apiextensions-apiserver v0.23.0 // indirect
+ k8s.io/component-base v0.23.0 // indirect
+ k8s.io/klog/v2 v2.60.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
+ sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
)
diff --git a/operator/go.sum b/operator/go.sum
index c65312c76b51c..7dc47647a2361 100644
--- a/operator/go.sum
+++ b/operator/go.sum
@@ -12,8 +12,13 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -34,14 +39,14 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
@@ -52,7 +57,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
@@ -68,8 +72,8 @@ github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/ViaQ/logerr v1.0.10 h1:ZSWC+n9cOCIrwXUYk9mSU96OhmdcGp5qDCNfPTBElVU=
-github.com/ViaQ/logerr v1.0.10/go.mod h1:KZ3ne81U/sJhHt3AjE5AvhoQDY0Rh1O+u4rEHKjG/No=
+github.com/ViaQ/logerr v1.1.0 h1:Jm+WBMbKUcwiDV/aJOIB5Rv5mg0UnyULRs1Jbz5Qq8U=
+github.com/ViaQ/logerr v1.1.0/go.mod h1:D0eovRXC5iBP/jW8Nb3mF25JVxg16eAEmwHBsrGCXlI=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -77,21 +81,28 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
@@ -101,7 +112,13 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -111,6 +128,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
@@ -142,19 +160,26 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
-github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
+github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
+github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
@@ -170,11 +195,11 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
-github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM=
-github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
+github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@@ -208,7 +233,6 @@ github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsd
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
@@ -224,12 +248,14 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -244,6 +270,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -260,11 +287,14 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
+github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -272,14 +302,19 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -288,6 +323,10 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -297,21 +336,23 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -327,7 +368,6 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
@@ -335,12 +375,14 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -348,8 +390,9 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -361,6 +404,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -371,6 +415,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -396,14 +441,16 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -422,27 +469,30 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
-github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=
-github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
-github.com/openshift/api v0.0.0-20210901140736-d8ed1449662d h1:2QcWZUp0R+ewJrK2Iuj8WaZikl/KccB2+/LOhB7RhEk=
-github.com/openshift/api v0.0.0-20210901140736-d8ed1449662d/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8=
-github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
+github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/openshift/api v0.0.0-20220124143425-d74727069f6f h1:iOTv1WudhVm2UsoST+L+ZrA5A9w57h9vmQsdlBuqG6g=
+github.com/openshift/api v0.0.0-20220124143425-d74727069f6f/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4=
+github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
@@ -465,17 +515,18 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.0 h1:HRmM4uANZDAjdvbsdfOoqI5UDbjz0faKeMs/cGPKKI0=
github.com/prometheus/common v0.32.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -489,17 +540,23 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -507,6 +564,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -516,12 +574,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
@@ -532,11 +592,18 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@@ -545,18 +612,36 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
+go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -568,12 +653,13 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
-golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -595,8 +681,9 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -605,7 +692,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -650,19 +738,36 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -723,22 +828,36 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
-golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -746,14 +865,15 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -810,12 +930,19 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q=
+golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -839,6 +966,12 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -868,16 +1001,30 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -891,6 +1038,16 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -902,8 +1059,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -917,6 +1075,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -926,6 +1085,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -947,63 +1107,66 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA=
-k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU=
-k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
-k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
+k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
+k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
+k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE=
-k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE=
-k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA=
+k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY=
+k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4=
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
-k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM=
-k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
-k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
+k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
+k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw=
-k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw=
+k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4=
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
-k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA=
-k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
-k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
+k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
+k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
+k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
-k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U=
-k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
+k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE=
k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k=
-k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4=
-k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc=
+k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8=
+k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
-k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
-k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
+k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
-k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
-k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
-k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
+k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
-k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
+k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/controller-runtime v0.9.2 h1:MnCAsopQno6+hI9SgJHKddzXpmv2wtouZz6931Eax+Q=
-sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I=
+sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ=
+sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
+sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
+sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
+sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/operator/internal/handlers/internal/gateway/tenant_configmap.go b/operator/internal/handlers/internal/gateway/tenant_configmap.go
index f29219fdc57b6..76088f5ea91be 100644
--- a/operator/internal/handlers/internal/gateway/tenant_configmap.go
+++ b/operator/internal/handlers/internal/gateway/tenant_configmap.go
@@ -3,19 +3,17 @@ package gateway
import (
"context"
+ "github.com/grafana/loki/operator/internal/external/k8s"
+ "github.com/grafana/loki/operator/internal/manifests"
"github.com/grafana/loki/operator/internal/manifests/openshift"
- "github.com/ViaQ/logerr/log"
-
"github.com/ViaQ/logerr/kverrors"
- "github.com/grafana/loki/operator/internal/manifests"
+ "github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/json"
- "sigs.k8s.io/yaml"
-
- "github.com/grafana/loki/operator/internal/external/k8s"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/yaml"
)
const (
@@ -41,7 +39,7 @@ type openShiftSpec struct {
// GetTenantConfigMapData returns the tenantName, tenantId, cookieSecret
// clusters to auto-create redirect URLs for OpenShift Auth or an error.
-func GetTenantConfigMapData(ctx context.Context, k k8s.Client, req ctrl.Request) map[string]openshift.TenantData {
+func GetTenantConfigMapData(ctx context.Context, log logr.Logger, k k8s.Client, req ctrl.Request) map[string]openshift.TenantData {
var tenantConfigMap corev1.ConfigMap
key := client.ObjectKey{Name: manifests.GatewayName(req.Name), Namespace: req.Namespace}
if err := k.Get(ctx, key, &tenantConfigMap); err != nil {
diff --git a/operator/internal/handlers/internal/gateway/tenant_configmap_test.go b/operator/internal/handlers/internal/gateway/tenant_configmap_test.go
index c0e76d589c8dc..de50259829de9 100644
--- a/operator/internal/handlers/internal/gateway/tenant_configmap_test.go
+++ b/operator/internal/handlers/internal/gateway/tenant_configmap_test.go
@@ -4,9 +4,10 @@ import (
"context"
"testing"
+ "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/manifests/openshift"
- "github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
+ "github.com/ViaQ/logerr/log"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -34,6 +35,8 @@ tenants:
cookieSecret: test789
`)
+var logger = log.DefaultLogger()
+
func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) {
k := &k8sfakes.FakeClient{}
r := ctrl.Request{
@@ -58,7 +61,7 @@ func TestGetTenantConfigMapData_ConfigMapExist(t *testing.T) {
return nil
}
- ts := GetTenantConfigMapData(context.TODO(), k, r)
+ ts := GetTenantConfigMapData(context.TODO(), logger, k, r)
require.NotNil(t, ts)
expected := map[string]openshift.TenantData{
@@ -88,6 +91,6 @@ func TestGetTenantConfigMapData_ConfigMapNotExist(t *testing.T) {
return nil
}
- ts := GetTenantConfigMapData(context.TODO(), k, r)
+ ts := GetTenantConfigMapData(context.TODO(), logger, k, r)
require.Nil(t, ts)
}
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index cfe6230b7cf3f..09f1ba30c562a 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -5,18 +5,17 @@ import (
"fmt"
"os"
- "github.com/grafana/loki/operator/internal/manifests/openshift"
-
- "github.com/ViaQ/logerr/kverrors"
- "github.com/ViaQ/logerr/log"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s"
"github.com/grafana/loki/operator/internal/handlers/internal/gateway"
"github.com/grafana/loki/operator/internal/handlers/internal/secrets"
"github.com/grafana/loki/operator/internal/manifests"
+ "github.com/grafana/loki/operator/internal/manifests/openshift"
"github.com/grafana/loki/operator/internal/metrics"
"github.com/grafana/loki/operator/internal/status"
+ "github.com/ViaQ/logerr/kverrors"
+ "github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -26,7 +25,7 @@ import (
)
// CreateOrUpdateLokiStack handles LokiStack create and update events.
-func CreateOrUpdateLokiStack(ctx context.Context, req ctrl.Request, k k8s.Client, s *runtime.Scheme, flags manifests.FeatureFlags) error {
+func CreateOrUpdateLokiStack(ctx context.Context, log logr.Logger, req ctrl.Request, k k8s.Client, s *runtime.Scheme, flags manifests.FeatureFlags) error {
ll := log.WithValues("lokistack", req.NamespacedName, "event", "createOrUpdate")
var stack lokiv1beta1.LokiStack
@@ -101,7 +100,7 @@ func CreateOrUpdateLokiStack(ctx context.Context, req ctrl.Request, k k8s.Client
}
// extract the existing tenant's id, cookieSecret if exists, otherwise create new.
- tenantConfigMap = gateway.GetTenantConfigMapData(ctx, k, req)
+ tenantConfigMap = gateway.GetTenantConfigMapData(ctx, log, k, req)
}
}
@@ -159,7 +158,7 @@ func CreateOrUpdateLokiStack(ctx context.Context, req ctrl.Request, k k8s.Client
}
desired := obj.DeepCopyObject().(client.Object)
- mutateFn := manifests.MutateFuncFor(obj, desired)
+ mutateFn := manifests.MutateFuncFor(log, obj, desired)
op, err := ctrl.CreateOrUpdate(ctx, k, obj, mutateFn)
if err != nil {
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index 77cca3a08125a..37173824b81af 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -8,15 +8,15 @@ import (
"os"
"testing"
- "github.com/ViaQ/logerr/log"
lokiv1beta1 "github.com/grafana/loki/operator/api/v1beta1"
"github.com/grafana/loki/operator/internal/external/k8s/k8sfakes"
"github.com/grafana/loki/operator/internal/handlers"
"github.com/grafana/loki/operator/internal/manifests"
+
+ "github.com/ViaQ/logerr/log"
routev1 "github.com/openshift/api/route/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,12 +26,13 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/utils/pointer"
-
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
+ logger = log.NewLogger("testing")
+
scheme = runtime.NewScheme()
flags = manifests.FeatureFlags{
EnableCertificateSigningService: false,
@@ -78,15 +79,12 @@ func TestMain(m *testing.M) {
testing.Init()
flag.Parse()
+ sink := log.MustGetSink(logger)
if testing.Verbose() {
// set to the highest for verbose testing
- log.SetLogLevel(5)
+ sink.SetVerbosity(5)
} else {
- if err := log.SetOutput(ioutil.Discard); err != nil {
- // This would only happen if the default logger was changed which it hasn't so
- // we can assume that a panic is necessary and the developer is to blame.
- panic(err)
- }
+ sink.SetOutput(ioutil.Discard)
}
// Register the clientgo and CRD schemes
@@ -94,7 +92,6 @@ func TestMain(m *testing.M) {
utilruntime.Must(routev1.AddToScheme(scheme))
utilruntime.Must(lokiv1beta1.AddToScheme(scheme))
- log.Init("testing")
os.Exit(m.Run())
}
@@ -111,7 +108,7 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNotFound_DoesNotError(t *testing.
return apierrors.NewNotFound(schema.GroupResource{}, "something wasn't found")
}
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
require.NoError(t, err)
// make sure create was NOT called because the Get failed
@@ -132,7 +129,7 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsAnErrorOtherThanNotFound_ReturnsT
return badRequestErr
}
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
require.Equal(t, badRequestErr, errors.Unwrap(err))
@@ -209,7 +206,7 @@ func TestCreateOrUpdateLokiStack_SetsNamespaceOnAllObjects(t *testing.T) {
return nil
}
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
require.NoError(t, err)
// make sure create was called
@@ -307,7 +304,7 @@ func TestCreateOrUpdateLokiStack_SetsOwnerRefOnAllObjects(t *testing.T) {
return nil
}
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
require.NoError(t, err)
// make sure create was called
@@ -357,7 +354,7 @@ func TestCreateOrUpdateLokiStack_WhenSetControllerRefInvalid_ContinueWithOtherOb
return nil
}
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
@@ -450,7 +447,7 @@ func TestCreateOrUpdateLokiStack_WhenGetReturnsNoError_UpdateObjects(t *testing.
return nil
}
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
require.NoError(t, err)
// make sure create not called
@@ -508,7 +505,7 @@ func TestCreateOrUpdateLokiStack_WhenCreateReturnsError_ContinueWithOtherObjects
return apierrors.NewTooManyRequestsError("too many create requests")
}
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
@@ -607,7 +604,7 @@ func TestCreateOrUpdateLokiStack_WhenUpdateReturnsError_ContinueWithOtherObjects
return apierrors.NewTooManyRequestsError("too many create requests")
}
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
@@ -655,7 +652,7 @@ func TestCreateOrUpdateLokiStack_WhenMissingSecret_SetDegraded(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
// make sure error is returned to re-trigger reconciliation
require.NoError(t, err)
@@ -711,7 +708,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidSecret_SetDegraded(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, flags)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, flags)
// make sure error is returned to re-trigger reconciliation
require.NoError(t, err)
@@ -786,7 +783,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidTenantsConfiguration_SetDegraded(t *
k.StatusStub = func() client.StatusWriter { return sw }
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff)
// make sure error is returned to re-trigger reconciliation
require.NoError(t, err)
@@ -866,7 +863,7 @@ func TestCreateOrUpdateLokiStack_WhenMissingGatewaySecret_SetDegraded(t *testing
k.StatusStub = func() client.StatusWriter { return sw }
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff)
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
@@ -950,7 +947,7 @@ func TestCreateOrUpdateLokiStack_WhenInvalidGatewaySecret_SetDegraded(t *testing
k.StatusStub = func() client.StatusWriter { return sw }
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff)
// make sure error is returned to re-trigger reconciliation
require.Error(t, err)
@@ -1012,7 +1009,7 @@ func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
k.StatusStub = func() client.StatusWriter { return sw }
- err := handlers.CreateOrUpdateLokiStack(context.TODO(), r, k, scheme, ff)
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, ff)
// make sure no error is returned
require.NoError(t, err)
diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go
index 0e7c2e00b6ad9..d176c576a911b 100644
--- a/operator/internal/manifests/gateway.go
+++ b/operator/internal/manifests/gateway.go
@@ -149,7 +149,7 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
},
},
LivenessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(gatewayInternalPort),
@@ -161,7 +161,7 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
FailureThreshold: 10,
},
ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(gatewayInternalPort),
@@ -388,14 +388,14 @@ func configureGatewayMetricsPKI(podSpec *corev1.PodSpec, serviceName string) err
}
uriSchemeContainerSpec := corev1.Container{
ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
},
},
},
LivenessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
},
diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go
index db2811b20257e..5606c3f5ccf54 100644
--- a/operator/internal/manifests/gateway_tenants_test.go
+++ b/operator/internal/manifests/gateway_tenants_test.go
@@ -220,7 +220,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
},
LivenessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
@@ -232,7 +232,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
FailureThreshold: 10,
},
ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
@@ -315,7 +315,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
},
LivenessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
@@ -327,7 +327,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
FailureThreshold: 10,
},
ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
@@ -452,7 +452,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
},
},
LivenessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
@@ -464,7 +464,7 @@ func TestConfigureDeploymentForMode(t *testing.T) {
FailureThreshold: 10,
},
ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(int(openshift.GatewayOPAInternalPort)),
diff --git a/operator/internal/manifests/mutate.go b/operator/internal/manifests/mutate.go
index 58bc64dac5195..6b887528a4120 100644
--- a/operator/internal/manifests/mutate.go
+++ b/operator/internal/manifests/mutate.go
@@ -4,11 +4,10 @@ import (
"reflect"
"github.com/ViaQ/logerr/kverrors"
- "github.com/ViaQ/logerr/log"
+ "github.com/go-logr/logr"
"github.com/imdario/mergo"
routev1 "github.com/openshift/api/route/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
-
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
@@ -25,14 +24,14 @@ import (
// - Deployment
// - StatefulSet
// - ServiceMonitor
-func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn {
+func MutateFuncFor(log logr.Logger, existing, desired client.Object) controllerutil.MutateFn {
return func() error {
existingAnnotations := existing.GetAnnotations()
- mergeWithOverride(&existingAnnotations, desired.GetAnnotations())
+ mergeWithOverride(log, &existingAnnotations, desired.GetAnnotations())
existing.SetAnnotations(existingAnnotations)
existingLabels := existing.GetLabels()
- mergeWithOverride(&existingLabels, desired.GetLabels())
+ mergeWithOverride(log, &existingLabels, desired.GetLabels())
existing.SetLabels(existingLabels)
switch existing.(type) {
@@ -44,7 +43,7 @@ func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn {
case *corev1.Service:
svc := existing.(*corev1.Service)
wantSvc := desired.(*corev1.Service)
- mutateService(svc, wantSvc)
+ mutateService(log, svc, wantSvc)
case *corev1.ServiceAccount:
sa := existing.(*corev1.ServiceAccount)
@@ -74,12 +73,12 @@ func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn {
case *appsv1.Deployment:
dpl := existing.(*appsv1.Deployment)
wantDpl := desired.(*appsv1.Deployment)
- mutateDeployment(dpl, wantDpl)
+ mutateDeployment(log, dpl, wantDpl)
case *appsv1.StatefulSet:
sts := existing.(*appsv1.StatefulSet)
wantSts := desired.(*appsv1.StatefulSet)
- mutateStatefulSet(sts, wantSts)
+ mutateStatefulSet(log, sts, wantSts)
case *monitoringv1.ServiceMonitor:
svcMonitor := existing.(*monitoringv1.ServiceMonitor)
@@ -109,7 +108,7 @@ func MutateFuncFor(existing, desired client.Object) controllerutil.MutateFn {
}
}
-func mergeWithOverride(dst, src interface{}) {
+func mergeWithOverride(log logr.Logger, dst, src interface{}) {
err := mergo.Merge(dst, src, mergo.WithOverride)
if err != nil {
log.Error(err, "unable to mergeWithOverride", "dst", dst, "src", src)
@@ -120,9 +119,9 @@ func mutateConfigMap(existing, desired *corev1.ConfigMap) {
existing.BinaryData = desired.BinaryData
}
-func mutateService(existing, desired *corev1.Service) {
+func mutateService(log logr.Logger, existing, desired *corev1.Service) {
existing.Spec.Ports = desired.Spec.Ports
- mergeWithOverride(&existing.Spec.Selector, desired.Spec.Selector)
+ mergeWithOverride(log, &existing.Spec.Selector, desired.Spec.Selector)
}
func mutateServiceAccount(existing, desired *corev1.ServiceAccount) {
@@ -154,18 +153,18 @@ func mutateRoleBinding(existing, desired *rbacv1.RoleBinding) {
existing.Subjects = desired.Subjects
}
-func mutateDeployment(existing, desired *appsv1.Deployment) {
+func mutateDeployment(log logr.Logger, existing, desired *appsv1.Deployment) {
// Deployment selector is immutable so we set this value only if
// a new object is going to be created
if existing.CreationTimestamp.IsZero() {
- mergeWithOverride(existing.Spec.Selector, desired.Spec.Selector)
+ mergeWithOverride(log, existing.Spec.Selector, desired.Spec.Selector)
}
existing.Spec.Replicas = desired.Spec.Replicas
- mergeWithOverride(&existing.Spec.Template, desired.Spec.Template)
- mergeWithOverride(&existing.Spec.Strategy, desired.Spec.Strategy)
+ mergeWithOverride(log, &existing.Spec.Template, desired.Spec.Template)
+ mergeWithOverride(log, &existing.Spec.Strategy, desired.Spec.Strategy)
}
-func mutateStatefulSet(existing, desired *appsv1.StatefulSet) {
+func mutateStatefulSet(log logr.Logger, existing, desired *appsv1.StatefulSet) {
// StatefulSet selector is immutable so we set this value only if
// a new object is going to be created
if existing.CreationTimestamp.IsZero() {
@@ -173,7 +172,7 @@ func mutateStatefulSet(existing, desired *appsv1.StatefulSet) {
}
existing.Spec.PodManagementPolicy = desired.Spec.PodManagementPolicy
existing.Spec.Replicas = desired.Spec.Replicas
- mergeWithOverride(&existing.Spec.Template, desired.Spec.Template)
+ mergeWithOverride(log, &existing.Spec.Template, desired.Spec.Template)
for i := range existing.Spec.VolumeClaimTemplates {
existing.Spec.VolumeClaimTemplates[i].TypeMeta = desired.Spec.VolumeClaimTemplates[i].TypeMeta
existing.Spec.VolumeClaimTemplates[i].ObjectMeta = desired.Spec.VolumeClaimTemplates[i].ObjectMeta
diff --git a/operator/internal/manifests/mutate_test.go b/operator/internal/manifests/mutate_test.go
index 87325792ce5c0..b0e1511c822f3 100644
--- a/operator/internal/manifests/mutate_test.go
+++ b/operator/internal/manifests/mutate_test.go
@@ -3,12 +3,12 @@ package manifests_test
import (
"testing"
+ "github.com/grafana/loki/operator/internal/manifests"
+
+ "github.com/ViaQ/logerr/log"
routev1 "github.com/openshift/api/route/v1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
-
- "github.com/grafana/loki/operator/internal/manifests"
"github.com/stretchr/testify/require"
-
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
@@ -18,6 +18,8 @@ import (
"k8s.io/utils/pointer"
)
+var logger = log.DefaultLogger()
+
func TestGetMutateFunc_MutateObjectMeta(t *testing.T) {
got := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -41,7 +43,7 @@ func TestGetMutateFunc_MutateObjectMeta(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
@@ -53,7 +55,7 @@ func TestGetMutateFunc_MutateObjectMeta(t *testing.T) {
func TestGetMutateFunc_ReturnErrOnNotSupportedType(t *testing.T) {
got := &corev1.Endpoints{}
want := &corev1.Endpoints{}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
require.Error(t, f())
}
@@ -69,7 +71,7 @@ func TestGetMutateFunc_MutateConfigMap(t *testing.T) {
BinaryData: map[string][]byte{"btest": []byte("btestss")},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
@@ -118,7 +120,7 @@ func TestGetMutateFunc_MutateServiceSpec(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
@@ -233,7 +235,7 @@ func TestGetMutateFunc_MutateServiceAccountObjectMeta(t *testing.T) {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
- f := manifests.MutateFuncFor(tt.got, tt.want)
+ f := manifests.MutateFuncFor(logger, tt.got, tt.want)
err := f()
require.NoError(t, err)
@@ -295,7 +297,7 @@ func TestGetMutateFunc_MutateClusterRole(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
@@ -360,7 +362,7 @@ func TestGetMutateFunc_MutateClusterRoleBinding(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
@@ -415,7 +417,7 @@ func TestGetMutateFunc_MutateRole(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
@@ -480,7 +482,7 @@ func TestGetMutateFunc_MutateRoleBinding(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
@@ -599,7 +601,7 @@ func TestGeMutateFunc_MutateDeploymentSpec(t *testing.T) {
tst := tst
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- f := manifests.MutateFuncFor(tst.got, tst.want)
+ f := manifests.MutateFuncFor(logger, tst.got, tst.want)
err := f()
require.NoError(t, err)
@@ -756,7 +758,7 @@ func TestGeMutateFunc_MutateStatefulSetSpec(t *testing.T) {
tst := tst
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- f := manifests.MutateFuncFor(tst.got, tst.want)
+ f := manifests.MutateFuncFor(logger, tst.got, tst.want)
err := f()
require.NoError(t, err)
@@ -929,7 +931,7 @@ func TestGetMutateFunc_MutateServiceMonitorSpec(t *testing.T) {
tst := tst
t.Run(tst.name, func(t *testing.T) {
t.Parallel()
- f := manifests.MutateFuncFor(tst.got, tst.want)
+ f := manifests.MutateFuncFor(logger, tst.got, tst.want)
err := f()
require.NoError(t, err)
@@ -997,7 +999,7 @@ func TestGetMutateFunc_MutateIngress(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
@@ -1050,7 +1052,7 @@ func TestGetMutateFunc_MutateRoute(t *testing.T) {
},
}
- f := manifests.MutateFuncFor(got, want)
+ f := manifests.MutateFuncFor(logger, got, want)
err := f()
require.NoError(t, err)
diff --git a/operator/internal/manifests/openshift/opa_openshift.go b/operator/internal/manifests/openshift/opa_openshift.go
index 23fcf9899d9ec..bf2d377b470c3 100644
--- a/operator/internal/manifests/openshift/opa_openshift.go
+++ b/operator/internal/manifests/openshift/opa_openshift.go
@@ -81,7 +81,7 @@ func newOPAOpenShiftContainer(sercretVolumeName, tlsDir, certFile, keyFile strin
},
},
LivenessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(int(GatewayOPAInternalPort)),
@@ -93,7 +93,7 @@ func newOPAOpenShiftContainer(sercretVolumeName, tlsDir, certFile, keyFile strin
FailureThreshold: 10,
},
ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(int(GatewayOPAInternalPort)),
diff --git a/operator/internal/manifests/query-frontend.go b/operator/internal/manifests/query-frontend.go
index 95cc7911fae96..8099bcaf008a9 100644
--- a/operator/internal/manifests/query-frontend.go
+++ b/operator/internal/manifests/query-frontend.go
@@ -66,7 +66,7 @@ func NewQueryFrontendDeployment(opts Options) *appsv1.Deployment {
fmt.Sprintf("-runtime-config.file=%s", path.Join(config.LokiConfigMountDir, config.LokiRuntimeConfigFileName)),
},
ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
// The frontend will only return ready once a querier has connected to it.
// Because the service used for connecting the querier to the frontend only lists ready
diff --git a/operator/internal/manifests/service_monitor.go b/operator/internal/manifests/service_monitor.go
index aeecc780b3383..80a4de61c9b82 100644
--- a/operator/internal/manifests/service_monitor.go
+++ b/operator/internal/manifests/service_monitor.go
@@ -162,14 +162,14 @@ func configureServiceMonitorPKI(podSpec *corev1.PodSpec, serviceName string) err
}
uriSchemeContainerSpec := corev1.Container{
ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
},
},
},
LivenessProbe: &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Scheme: corev1.URISchemeHTTPS,
},
diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go
index 0fa8cb4ab41a6..ee49fc12dd03d 100644
--- a/operator/internal/manifests/var.go
+++ b/operator/internal/manifests/var.go
@@ -243,7 +243,7 @@ func serviceMonitorEndpoint(portName, serviceName, namespace string, enableTLS b
func lokiLivenessProbe() *corev1.Probe {
return &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: lokiLivenessPath,
Port: intstr.FromInt(httpPort),
@@ -259,7 +259,7 @@ func lokiLivenessProbe() *corev1.Probe {
func lokiReadinessProbe() *corev1.Probe {
return &corev1.Probe{
- Handler: corev1.Handler{
+ ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: lokiReadinessPath,
Port: intstr.FromInt(httpPort),
diff --git a/operator/main.go b/operator/main.go
index ffa16c3f2f421..f2ecbc6411496 100644
--- a/operator/main.go
+++ b/operator/main.go
@@ -68,11 +68,11 @@ func main() {
flag.BoolVar(&enablePrometheusAlerts, "with-prometheus-alerts", false, "Enables prometheus alerts")
flag.Parse()
- log.Init("loki-operator")
- ctrl.SetLogger(log.GetLogger())
+ logger := log.NewLogger("loki-operator")
+ ctrl.SetLogger(logger)
if enablePrometheusAlerts && !enableServiceMonitors {
- log.Error(kverrors.New("-with-prometheus-alerts flag requires -with-service-monitors"), "")
+ logger.Error(kverrors.New("-with-prometheus-alerts flag requires -with-service-monitors"), "")
os.Exit(1)
}
@@ -97,7 +97,7 @@ func main() {
LeaderElectionID: "e3716011.grafana.com",
})
if err != nil {
- log.Error(err, "unable to start manager")
+ logger.Error(err, "unable to start manager")
os.Exit(1)
}
@@ -112,37 +112,37 @@ func main() {
if err = (&controllers.LokiStackReconciler{
Client: mgr.GetClient(),
- Log: log.WithName("controllers").WithName("LokiStack"),
+ Log: logger.WithName("controllers").WithName("LokiStack"),
Scheme: mgr.GetScheme(),
Flags: featureFlags,
}).SetupWithManager(mgr); err != nil {
- log.Error(err, "unable to create controller", "controller", "LokiStack")
+ logger.Error(err, "unable to create controller", "controller", "LokiStack")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err = mgr.AddHealthzCheck("health", healthz.Ping); err != nil {
- log.Error(err, "unable to set up health check")
+ logger.Error(err, "unable to set up health check")
os.Exit(1)
}
if err = mgr.AddReadyzCheck("check", healthz.Ping); err != nil {
- log.Error(err, "unable to set up ready check")
+ logger.Error(err, "unable to set up ready check")
os.Exit(1)
}
- log.Info("registering metrics")
+ logger.Info("registering metrics")
metrics.RegisterMetricCollectors()
- log.Info("Registering profiling endpoints.")
+ logger.Info("Registering profiling endpoints.")
err = registerProfiler(mgr)
if err != nil {
- log.Error(err, "failed to register extra pprof handler")
+ logger.Error(err, "failed to register extra pprof handler")
os.Exit(1)
}
- log.Info("starting manager")
+ logger.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
- log.Error(err, "problem running manager")
+ logger.Error(err, "problem running manager")
os.Exit(1)
}
}
|
operator
|
Update operator-sdk to 1.18.1 (#5704)
|
3e451c7ab22e03a6ac35260696b16c0560020a31
|
2024-10-15 18:36:30
|
Jay Clifford
|
docs: Updated Fluent Bit docs to use official plugin + Sandbox (#14004)
| false
|
diff --git a/docs/sources/send-data/fluentbit/_index.md b/docs/sources/send-data/fluentbit/_index.md
index ea2af6a4ac4b7..5a6884efd5dbc 100644
--- a/docs/sources/send-data/fluentbit/_index.md
+++ b/docs/sources/send-data/fluentbit/_index.md
@@ -1,282 +1,24 @@
---
-title: Fluent Bit client
+title: Fluent Bit
menuTitle: Fluent Bit
description: Provides instructions for how to install, configure, and use the Fluent Bit client to send logs to Loki.
aliases:
- ../clients/fluentbit/
weight: 500
---
-# Fluent Bit client
+# Fluent Bit
-[Fluent Bit](https://fluentbit.io/) is a fast and lightweight logs and metrics processor and forwarder that can be configured with the Grafana Fluent Bit Plugin described here or with the [Fluent-bit Loki output plugin](https://docs.fluentbit.io/manual/pipeline/outputs/loki) to ship logs to Loki.
-This plugin has more configuration options compared to the built-in Fluent Bit Loki plugin.
-You can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines.
+[Fluent Bit](https://fluentbit.io/) is a fast, lightweight logs and metrics agent. It is a CNCF graduated sub-project under the umbrella of Fluentd. Fluent Bit is licensed under the terms of the Apache License v2.0.
-{{< youtube id="s43IBSVyTpQ" >}}
+When using Fluent Bit to ship logs to Loki, you can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines.
-## Usage
+There are two Fluent Bit plugins for Loki:
-### Docker
+1. The integrated `loki` [plugin](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/fluentbit/fluent-bit-plugin/), which is officially maintained by the Fluent Bit project.
+2. The `grafana-loki` [plugin](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/fluentbit/community-plugin/), an alternative community plugin by Grafana Labs.
-You can run a Fluent Bit container with Loki output plugin pre-installed using our [Docker Hub](https://hub.docker.com/r/grafana/fluent-bit-plugin-loki) image:
+We recommend using the `loki` plugin as this provides the most complete feature set and is actively maintained by the Fluent Bit project.
-```bash
-docker run -v /var/log:/var/log \
- -e LOG_PATH="/var/log/*.log" -e LOKI_URL="http://localhost:3100/loki/api/v1/push" \
- grafana/fluent-bit-plugin-loki:latest
-```
+## Tutorial
-Or, an alternative is to run the fluent-bit container using [Docker Hub](https://hub.docker.com/r/fluent/fluent-bit) image:
-
-### Docker Container Logs
-
-To ship logs from Docker containers to Grafana Cloud using Fluent Bit, you can use the Fluent Bit Docker image and configure it to forward logs directly to Grafana Cloud's Loki. Below is a step-by-step guide on setting up Fluent Bit for this purpose.
-
-#### Prerequisites
-
-- Docker is installed on your machine.
-- You have a Grafana Cloud account with access to Loki.
-
-#### Configuration
-
-1. Create a Fluent Bit configuration file named `fluent-bit.conf` with the following content, which defines the input from Docker container logs and sets up the output to send logs to your Grafana Cloud Loki instance:
-
- ```ini
- [SERVICE]
- Flush 1
- Log_Level info
-
- [INPUT]
- Name tail
- Path /var/lib/docker/containers/*/*.log
- Parser docker
- Tag docker.*
-
- [OUTPUT]
- Name loki
- Match *
- Host logs-prod-006.grafana.net
- Port 443
- TLS On
- TLS.Verify On
- HTTP_User 478625
- HTTP_Passwd YOUR_GRAFANA_CLOUD_API_KEY
- Labels job=fluentbit
-
-### Kubernetes
-
-You can run Fluent Bit as a [Daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) to collect all your Kubernetes workload logs.
-
-To do so you can use the [Fluent Bit helm chart](https://github.com/fluent/helm-charts) with the following `values.yaml` changing the value of `FLUENT_LOKI_URL`:
-
-```yaml
-image:
- # Here we use the Docker image which has the plugin installed
- repository: grafana/fluent-bit-plugin-loki
- tag: main-e2ed1c0
-
-args:
- - "-e"
- - "/fluent-bit/bin/out_grafana_loki.so"
- - --workdir=/fluent-bit/etc
- - --config=/fluent-bit/etc/conf/fluent-bit.conf
-
-env:
- # Note that for security reasons you should fetch the credentials through a Kubernetes Secret https://kubernetes.io/docs/concepts/configuration/secret/ . You may use the envFrom for this.
- - name: FLUENT_LOKI_URL
- value: https://user:[email protected]/loki/api/v1/push
-
-config:
- inputs: |
- [INPUT]
- Name tail
- Tag kube.*
- Path /var/log/containers/*.log
- # Be aware that local clusters like docker-desktop or kind use the docker log format and not the cri (https://docs.fluentbit.io/manual/installation/kubernetes#container-runtime-interface-cri-parser)
- multiline.parser docker, cri
- Mem_Buf_Limit 5MB
- Skip_Long_Lines On
-
- outputs: |
- [Output]
- Name grafana-loki
- Match kube.*
- Url ${FLUENT_LOKI_URL}
- Labels {job="fluent-bit"}
- LabelKeys level,app # this sets the values for actual Loki streams and the other labels are converted to structured_metadata https://grafana.com/docs/loki/<LOKI_VERSION>/get-started/labels/structured-metadata/
- BatchWait 1
- BatchSize 1001024
- LineFormat json
- LogLevel info
- AutoKubernetesLabels true
-```
-
-```bash
-helm repo add fluent https://fluent.github.io/helm-charts
-helm repo update
-helm install fluent-bit fluent/fluent-bit -f values.yaml
-```
-
-By default it will collect all containers logs and extract labels from Kubernetes API (`container_name`, `namespace`, etc..).
-
-If you also want to host your Loki instance inside the cluster install the [official Loki helm chart](https://grafana.com/docs/loki/<LOKI_VERSION>/setup/install/helm/).
-
-### AWS Elastic Container Service (ECS)
-
-You can use fluent-bit Loki Docker image as a Firelens log router in AWS ECS.
-For more information about this see our [AWS documentation]({{< relref "../promtail/cloud/ecs" >}})
-
-### Local
-
-First, you need to follow the [instructions](https://github.com/grafana/loki/blob/main/clients/cmd/fluent-bit/README.md) in order to build the plugin dynamic library.
-
-Assuming you have Fluent Bit installed in your `$PATH`, you can run the plugin using:
-
-```bash
-fluent-bit -e /path/to/built/out_grafana_loki.so -c fluent-bit.conf
-```
-
-You can also adapt your plugins.conf, removing the need to change the command line options:
-
-```conf
-[PLUGINS]
- Path /path/to/built/out_grafana_loki.so
-```
-
-## Configuration Options
-
-| Key | Description | Default |
-|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------|
-| Url | Url of Loki server API endpoint. | http://localhost:3100/loki/api/v1/push |
-| TenantID | The tenant ID used by default to push logs to Loki. If omitted or empty it assumes Loki is running in single-tenant mode and no `X-Scope-OrgID` header is sent. | "" |
-| BatchWait | Time to wait before send a log batch to Loki, full or not. | 1s |
-| BatchSize | Log batch size to send a log batch to Loki (unit: Bytes). | 10 KiB (10 * 1024 Bytes) |
-| Timeout | Maximum time to wait for Loki server to respond to a request. | 10s |
-| MinBackoff | Initial backoff time between retries. | 500ms |
-| MaxBackoff | Maximum backoff time between retries. | 5m |
-| MaxRetries | Maximum number of retries when sending batches. Setting it to `0` will retry indefinitely. | 10 |
-| Labels | labels for API requests. | {job="fluent-bit"} |
-| LogLevel | LogLevel for plugin logger. | "info" |
-| RemoveKeys | Specify removing keys. | none |
-| AutoKubernetesLabels | If set to true, it will add all Kubernetes labels to Loki labels | false |
-| LabelKeys | Comma separated list of keys to use as stream labels. All other keys will be placed into the log line. LabelKeys is deactivated when using `LabelMapPath` label mapping configuration. | none |
-| LineFormat | Format to use when flattening the record to a log line. Valid values are "json" or "key_value". If set to "json" the log line sent to Loki will be the fluentd record (excluding any keys extracted out as labels) dumped as json. If set to "key_value", the log line will be each item in the record concatenated together (separated by a single space) in the format <key>=<value>. | json |
-| DropSingleKey | If set to true and after extracting label_keys a record only has a single key remaining, the log line sent to Loki will just be the value of the record key. | true |
-| LabelMapPath | Path to a json file defining how to transform nested records. | none |
-| Buffer | Enable buffering mechanism | false |
-| BufferType | Specify the buffering mechanism to use (currently only dque is implemented). | dque |
-| DqueDir | Path to the directory for queued logs | /tmp/flb-storage/loki |
-| DqueSegmentSize | Segment size in terms of number of records per segment | 500 |
-| DqueSync | Whether to fsync each queue change. Specify no fsync with "normal", and fsync with "full". | "normal" |
-| DqueName | Queue name, must be uniq per output | dque |
-
-### Labels
-
-Labels are used to [query logs]({{< relref "../../query" >}}) `{container_name="nginx", cluster="us-west1"}`, they are usually metadata about the workload producing the log stream (`instance`, `container_name`, `region`, `cluster`, `level`). In Loki labels are indexed consequently you should be cautious when choosing them (high cardinality label values can have performance drastic impact).
-
-You can use `Labels`, `RemoveKeys` , `LabelKeys` and `LabelMapPath` to how the output plugin will perform labels extraction.
-
-### AutoKubernetesLabels
-
-If set to true, it will add all Kubernetes labels to Loki labels automatically and ignore parameters `LabelKeys`, LabelMapPath.
-
-### LabelMapPath
-
-When using the `Parser` and `Filter` plugins Fluent Bit can extract and add data to the current record/log data. While Loki labels are key value pair, record data can be nested structures.
-You can pass a JSON file that defines how to extract labels from each record. Each json key from the file will be matched with the log record to find label values. Values from the configuration are used as label names.
-
-Considering the record below :
-
-```json
-{
- "kubernetes": {
- "container_name": "promtail",
- "pod_name": "promtail-xxx",
- "namespace_name": "prod",
- "labels" : {
- "team": "x-men"
- }
- },
- "HOSTNAME": "docker-desktop",
- "log" : "a log line",
- "time": "20190926T152206Z"
-}
-```
-
-and a LabelMap file as follow :
-
-```json
-{
- "kubernetes": {
- "container_name": "container",
- "pod_name": "pod",
- "namespace_name": "namespace",
- "labels" : {
- "team": "team"
- }
- }
-}
-```
-
-The labels extracted will be `{team="x-men", container="promtail", pod="promtail-xxx", namespace="prod"}`.
-
-If you don't want the `kubernetes` and `HOSTNAME` fields to appear in the log line you can use the `RemoveKeys` configuration field. (e.g. `RemoveKeys kubernetes,HOSTNAME`).
-
-### Buffering
-
-Buffering refers to the ability to store the records somewhere, and while they are processed and delivered, still be able to store more. The Loki output plugin can be blocked by the Loki client because of its design:
-
-- If the BatchSize is over the limit, the output plugin pauses receiving new records until the pending batch is successfully sent to the server
-- If the Loki server is unreachable (retry 429s, 500s and connection-level errors), the output plugin blocks new records until the Loki server is available again, and the pending batch is successfully sent to the server or as long as the maximum number of attempts has been reached within configured back-off mechanism
-
-The blocking state with some of the input plugins is not acceptable, because it can have an undesirable side effect on the part that generates the logs. Fluent Bit implements a buffering mechanism that is based on parallel processing. Therefore, it cannot send logs in order. There are two ways of handling the out-of-order logs:
-
-- Configure Loki to [accept out-of-order writes](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#accept-out-of-order-writes).
-
-- Configure the Loki output plugin to use the buffering mechanism based on [`dque`](https://github.com/joncrlsn/dque), which is compatible with the Loki server strict time ordering:
-
- ```properties
- [Output]
- Name grafana-loki
- Match *
- Url http://localhost:3100/loki/api/v1/push
- Buffer true
- DqueSegmentSize 8096
- DqueDir /tmp/flb-storage/buffer
- DqueName loki.0
- ```
-
-### Configuration examples
-
-To configure the Loki output plugin add this section to fluent-bit.conf
-
-```properties
-[Output]
- Name grafana-loki
- Match *
- Url http://localhost:3100/loki/api/v1/push
- BatchWait 1s
- BatchSize 30720
- # (30KiB)
- Labels {test="fluent-bit-go", lang="Golang"}
- RemoveKeys key1,key2
- LabelKeys key3,key4
- LineFormat key_value
-```
-
-```properties
-[Output]
- Name grafana-loki
- Match *
- Url http://localhost:3100/loki/api/v1/push
- BatchWait 1s
- BatchSize 30720 # (30KiB)
- AutoKubernetesLabels true
- RemoveKeys key1,key2
-```
-
-A full [example configuration file](https://github.com/grafana/loki/blob/main/clients/cmd/fluent-bit/fluent-bit.conf) is also available in the Loki repository.
-
-### Running multiple plugin instances
-
-You can run multiple plugin instances in the same fluent-bit process, for example if you want to push to different Loki servers or route logs into different Loki tenant IDs. To do so, add additional `[Output]` sections.
+To get started with the `loki` plugin, follow the [Sending logs to Loki using Fluent Bit tutorial](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/fluentbit/fluent-bit-loki-tutorial/).
diff --git a/docs/sources/send-data/fluentbit/community-plugin.md b/docs/sources/send-data/fluentbit/community-plugin.md
new file mode 100644
index 0000000000000..60dd5fef74a3f
--- /dev/null
+++ b/docs/sources/send-data/fluentbit/community-plugin.md
@@ -0,0 +1,281 @@
+---
+title: Fluent Bit community plugin
+menuTitle: Fluent Bit Community Plugin
+description: Provides instructions for how to install, configure, and use the Fluent Bit Community plugin to send logs to Loki.
+aliases:
+- ../clients/fluentbit/
+weight: 500
+---
+# Fluent Bit community plugin
+
+{{< admonition type="warning" >}}
+
+We recommend using the official [Fluent Bit Loki plugin](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/fluentbit/fluent-bit-plugin/). The official plugin is more feature-rich and has better support for features such as structured metadata. The community plugin is still available for use, but it's no longer actively maintained.
+
+{{< /admonition >}}
+
+The Fluent Bit community plugin by Grafana Labs (`grafana-loki`) provided an alternative way to send logs to Loki. Although very similar to the [official plugin](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/fluentbit/fluent-bit-plugin/) there are some differences in the configuration options. This page provides instructions for how to install, configure, and use the Fluent Bit community plugin to send logs to Loki. Although the plugin is no longer actively maintained, this documentation is still available for reference.
+
+{{< youtube id="s43IBSVyTpQ" >}}
+
+## Usage
+
+### Docker
+
+You can run a Fluent Bit container with Loki output plugin pre-installed using our [Docker Hub](https://hub.docker.com/r/grafana/fluent-bit-plugin-loki) image:
+
+```bash
+docker run -v /var/log:/var/log \
+ -e LOG_PATH="/var/log/*.log" -e LOKI_URL="http://localhost:3100/loki/api/v1/push" \
+ grafana/fluent-bit-plugin-loki:latest
+```
+
+Or, an alternative is to run the fluent-bit container using [Docker Hub](https://hub.docker.com/r/fluent/fluent-bit) image:
+
+### Docker container logs
+
+To ship logs from Docker containers to Grafana Cloud using Fluent Bit, you can use the Fluent Bit Docker image and configure it to forward logs directly to Grafana Loki. Below is a step-by-step guide on setting up Fluent Bit for this purpose.
+
+#### Prerequisites
+
+- Docker is installed on your machine.
+- Running instance of Loki OSS.
+
+#### Configuration
+
+1. Create a Fluent Bit configuration file named `fluent-bit.conf` with the following content, which defines the input from Docker container logs and sets up the output to send logs to your Grafana Cloud Loki instance:
+
+ ```ini
+ [SERVICE]
+ Flush 1
+ Log_Level info
+
+ [INPUT]
+ Name tail
+ Path /var/lib/docker/containers/*/*.log
+ Parser docker
+ Tag docker.*
+
+ [OUTPUT]
+ Name grafana-loki
+ Match *
+ Url http://localhost:3100/loki/api/v1/push
+ Labels {job="fluentbit"}
+
+### Kubernetes
+
+You can run Fluent Bit as a [daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) to collect all your Kubernetes workload logs.
+
+To do so you can use the [Fluent Bit Helm chart](https://github.com/fluent/helm-charts) with the following `values.yaml` changing the value of `FLUENT_LOKI_URL`:
+
+```yaml
+image:
+ # Here we use the Docker image which has the plugin installed
+ repository: grafana/fluent-bit-plugin-loki
+ tag: main-e2ed1c0
+
+args:
+ - "-e"
+ - "/fluent-bit/bin/out_grafana_loki.so"
+ - --workdir=/fluent-bit/etc
+ - --config=/fluent-bit/etc/conf/fluent-bit.conf
+
+env:
+ # Note that for security reasons you should fetch the credentials through a Kubernetes Secret https://kubernetes.io/docs/concepts/configuration/secret/ . You may use the envFrom for this.
+ - name: FLUENT_LOKI_URL
+ value: https://user:[email protected]/loki/api/v1/push
+
+config:
+ inputs: |
+ [INPUT]
+ Name tail
+ Tag kube.*
+ Path /var/log/containers/*.log
+ # Be aware that local clusters like docker-desktop or kind use the docker log format and not the cri (https://docs.fluentbit.io/manual/installation/kubernetes#container-runtime-interface-cri-parser)
+ multiline.parser docker, cri
+ Mem_Buf_Limit 5MB
+ Skip_Long_Lines On
+
+ outputs: |
+ [Output]
+ Name grafana-loki
+ Match kube.*
+ Url ${FLUENT_LOKI_URL}
+ Labels {job="fluent-bit"}
+ LabelKeys level,app # this sets the values for actual Loki streams and the other labels are converted to structured_metadata https://grafana.com/docs/loki/<LOKI_VERSION>/get-started/labels/structured-metadata/
+ BatchWait 1
+ BatchSize 1001024
+ LineFormat json
+ LogLevel info
+ AutoKubernetesLabels true
+```
+
+```bash
+helm repo add fluent https://fluent.github.io/helm-charts
+helm repo update
+helm install fluent-bit fluent/fluent-bit -f values.yaml
+```
+
+By default it will collect all containers logs and extract labels from Kubernetes API (`container_name`, `namespace`, etc.).
+
+If you also want to host your Loki instance inside the cluster install the [official Loki Helm chart](https://grafana.com/docs/loki/<LOKI_VERSION>/setup/install/helm/).
+
+### AWS Elastic Container Service (ECS)
+
+You can use the fluent-bit Loki Docker image as a Firelens log router in AWS ECS.
+For more information about this see our [AWS documentation](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/promtail/cloud/ecs/).
+
+### Local
+
+First, you need to follow the [instructions](https://github.com/grafana/loki/blob/main/clients/cmd/fluent-bit/README.md) in order to build the plugin dynamic library.
+
+Assuming you have Fluent Bit installed in your `$PATH` you can run the plugin using:
+
+```bash
+fluent-bit -e /path/to/built/out_grafana_loki.so -c fluent-bit.conf
+```
+
+You can also adapt your plugins.conf, removing the need to change the command line options:
+
+```conf
+[PLUGINS]
+ Path /path/to/built/out_grafana_loki.so
+```
+
+## Configuration options
+
+| Key | Description | Default |
+|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------|
+| Url | Url of Loki server API endpoint. | http://localhost:3100/loki/api/v1/push |
+| TenantID | The tenant ID used by default to push logs to Loki. If omitted or empty it assumes Loki is running in single-tenant mode and no `X-Scope-OrgID` header is sent. | "" |
+| BatchWait | Time to wait before send a log batch to Loki, full or not. | 1s |
+| BatchSize | Log batch size to send a log batch to Loki (unit: Bytes). | 10 KiB (10 * 1024 Bytes) |
+| Timeout | Maximum time to wait for Loki server to respond to a request. | 10s |
+| MinBackoff | Initial backoff time between retries. | 500ms |
+| MaxBackoff | Maximum backoff time between retries. | 5m |
+| MaxRetries | Maximum number of retries when sending batches. Setting it to `0` will retry indefinitely. | 10 |
+| Labels | Labels for API requests. | {job="fluent-bit"} |
+| LogLevel | LogLevel for plugin logger. | `info` |
+| RemoveKeys | Specify removing keys. | none |
+| AutoKubernetesLabels | If set to `true`, it will add all Kubernetes labels to Loki labels. | false |
+| LabelKeys | Comma separated list of keys to use as stream labels. All other keys will be placed into the log line. LabelKeys is deactivated when using `LabelMapPath` label mapping configuration. | none |
+| LineFormat | Format to use when flattening the record to a log line. Valid values are `json` or `key_value`. If set to `json` the log line sent to Loki will be the fluentd record (excluding any keys extracted out as labels) dumped as json. If set to `key_value`, the log line will be each item in the record concatenated together (separated by a single space) in the format <key>=<value>. | json |
+| DropSingleKey | If set to true and after extracting label_keys a record only has a single key remaining, the log line sent to Loki will just be the value of the record key. | true |
+| LabelMapPath | Path to a json file defining how to transform nested records. | none |
+| Buffer | Enable buffering mechanism. | false |
+| BufferType | Specify the buffering mechanism to use (currently only `dque` is implemented). | dque |
+| DqueDir | Path to the directory for queued logs. | /tmp/flb-storage/loki |
+| DqueSegmentSize | Segment size in terms of number of records per segment. | 500 |
+| DqueSync | Whether to fsync each queue change. Specify no fsync with `normal`, and fsync with `full`. | `normal` |
+| DqueName | Queue name, must be unique per output. | dque |
+
+### Labels
+
+Labels, for example `{container_name="nginx", cluster="us-west1"}`, are used to [query logs](https://grafana.com/docs/loki/<LOKI_VERSION>/query/). Labels are usually metadata about the workload producing the log stream (`instance`, `container_name`, `region`, `cluster`, `level`). In Loki labels are indexed, so you should be cautious when choosing them. High cardinality label values can have drastic impact on query performance.
+
+You can use the config parameters `Labels`, `RemoveKeys` , `LabelKeys` and `LabelMapPath` to instruct the output plugin how to perform labels extraction from your log entries or to add static labels to all log entries.
+
+### AutoKubernetesLabels
+
+If set to `true`, `AutoKubernetesLabels` will add all Kubernetes labels to Loki labels automatically and ignore parameters `LabelKeys`, `LabelMapPath`.
+
+### LabelMapPath
+
+When using the `Parser` and `Filter` plugins Fluent Bit can extract and add data to the current record/log data. While Loki labels are key value pairs, record data can be nested structures.
+You can pass a JSON file that defines how to extract labels from each record. Each JSON key from the file will be matched with the log record to find label values. Values from the configuration are used as label names.
+
+Considering the record below :
+
+```json
+{
+ "kubernetes": {
+ "container_name": "promtail",
+ "pod_name": "promtail-xxx",
+ "namespace_name": "prod",
+ "labels" : {
+ "team": "x-men"
+ }
+ },
+ "HOSTNAME": "docker-desktop",
+ "log" : "a log line",
+ "time": "20190926T152206Z"
+}
+```
+
+and a LabelMap file as follows :
+
+```json
+{
+ "kubernetes": {
+ "container_name": "container",
+ "pod_name": "pod",
+ "namespace_name": "namespace",
+ "labels" : {
+ "team": "team"
+ }
+ }
+}
+```
+
+The labels extracted will be `{team="x-men", container="promtail", pod="promtail-xxx", namespace="prod"}`.
+
+If you don't want the `kubernetes` and `HOSTNAME` fields to appear in the log line you can use the `RemoveKeys` configuration field. For example, `RemoveKeys kubernetes,HOSTNAME`.
+
+### Buffering
+
+Buffering refers to the ability to store the records somewhere, and while they are processed and delivered, still be able to continue storing more records. The Loki output plugin can be blocked by the Loki client because of its design:
+
+- If the BatchSize is over the limit, the output plugin pauses receiving new records until the pending batch is successfully sent to the server
+- If the Loki server is unreachable (retry 429s, 500s and connection-level errors), the output plugin blocks new records until the Loki server is available again, and the pending batch is successfully sent to the server or as long as the maximum number of attempts has been reached within configured back-off mechanism
+
+The blocking state with some of the input plugins is not acceptable, because it can have an undesirable side effect on the part that generates the logs. Fluent Bit implements a buffering mechanism that is based on parallel processing. Therefore, it cannot send logs in order. There are two ways of handling the out-of-order logs:
+
+- Configure Loki to [accept out-of-order writes](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#accept-out-of-order-writes).
+
+- Configure the Loki output plugin to use the buffering mechanism based on [`dque`](https://github.com/joncrlsn/dque), which is compatible with the Loki server strict time ordering:
+
+ ```properties
+ [Output]
+ Name grafana-loki
+ Match *
+ Url http://localhost:3100/loki/api/v1/push
+ Buffer true
+ DqueSegmentSize 8096
+ DqueDir /tmp/flb-storage/buffer
+ DqueName loki.0
+ ```
+
+### Configuration examples
+
+To configure the Loki output plugin add this section to your luent-bit.conf file.
+
+```properties
+[Output]
+ Name grafana-loki
+ Match *
+ Url http://localhost:3100/loki/api/v1/push
+ BatchWait 1s
+ BatchSize 30720
+ # (30KiB)
+ Labels {test="fluent-bit-go", lang="Golang"}
+ RemoveKeys key1,key2
+ LabelKeys key3,key4
+ LineFormat key_value
+```
+
+```properties
+[Output]
+ Name grafana-loki
+ Match *
+ Url http://localhost:3100/loki/api/v1/push
+ BatchWait 1s
+ BatchSize 30720 # (30KiB)
+ AutoKubernetesLabels true
+ RemoveKeys key1,key2
+```
+
+A full [example configuration file](https://github.com/grafana/loki/blob/main/clients/cmd/fluent-bit/fluent-bit.conf) is also available in the Loki repository.
+
+### Running multiple plugin instances
+
+You can run multiple plugin instances in the same fluent-bit process, for example if you want to push to different Loki servers or route logs into different Loki tenant IDs. To do so, add additional `[Output]` sections.
diff --git a/docs/sources/send-data/fluentbit/fluent-bit-loki-tutorial.md b/docs/sources/send-data/fluentbit/fluent-bit-loki-tutorial.md
new file mode 100644
index 0000000000000..67e2a583e0445
--- /dev/null
+++ b/docs/sources/send-data/fluentbit/fluent-bit-loki-tutorial.md
@@ -0,0 +1,268 @@
+---
+title: Sending logs to Loki using Fluent Bit tutorial
+menuTitle: Fluent Bit tutorial
+description: Sending logs to Loki using Fluent Bit using the official Fluent Bit Loki output plugin.
+weight: 250
+killercoda:
+ title: Sending logs to Loki using Fluent Bit tutorial
+ description: Sending logs to Loki using Fluent Bit using the official Fluent Bit Loki output plugin.
+ preprocessing:
+ substitutions:
+ - regexp: loki-fundamentals-fluent-bit-1
+ replacement: loki-fundamentals_fluent-bit_1
+ - regexp: docker compose
+ replacement: docker-compose
+ backend:
+ imageid: ubuntu
+---
+
+<!-- INTERACTIVE page intro.md START -->
+
+# Sending logs to Loki using Fluent Bit tutorial
+
+In this tutorial, you will learn how to send logs to Loki using Fluent Bit. Fluent Bit is a lightweight and fast log processor and forwarder that can collect, process, and deliver logs to various destinations. We will use the official Fluent Bit Loki output plugin to send logs to Loki.
+
+
+<!-- INTERACTIVE ignore START -->
+
+## Dependencies
+
+Before you begin, ensure you have the following to run the demo:
+
+- Docker
+- Docker Compose
+
+{{< admonition type="tip" >}}
+Alternatively, you can try out this example in our interactive learning environment: [Sending logs to Loki using Fluent Bit tutorial](https://killercoda.com/grafana-labs/course/loki/fluentbit-loki-tutorial).
+
+It's a fully configured environment with all the dependencies already installed.
+
+
+
+Provide feedback, report bugs, and raise issues in the [Grafana Killercoda repository](https://github.com/grafana/killercoda).
+{{< /admonition >}}
+
+<!-- INTERACTIVE ignore END -->
+
+## Scenario
+
+In this scenario, we have a microservices application called the Carnivorous Greenhouse. This application consists of the following services:
+
+- **User Service:** Manages user data and authentication for the application. Such as creating users and logging in.
+- **Plant Service:** Manages the creation of new plants and updates other services when a new plant is created.
+- **Simulation Service:** Generates sensor data for each plant.
+- **Websocket Service:** Manages the websocket connections for the application.
+- **Bug Service:** A service that when enabled, randomly causes services to fail and generate additional logs.
+- **Main App:** The main application that ties all the services together.
+- **Database:** A database that stores user and plant data.
+
+Each service has been instrumented with the Fluent Bit logging framework to generate logs. If you would like to learn more about how the Carnivorous Greenhouse application was instrumented with Fluent Bit, refer to the [Carnivorous Greenhouse repository](https://github.com/grafana/loki-fundamentals/blob/fluentbit-official/greenhouse/loggingfw.py).
+
+<!-- INTERACTIVE page intro.md END -->
+
+<!-- INTERACTIVE page step1.md START -->
+
+## Step 1: Environment setup
+
+In this step, we will set up our environment by cloning the repository that contains our demo application and spinning up our observability stack using Docker Compose.
+
+1. To get started, clone the repository that contains our demo application:
+
+ ```bash
+ git clone -b fluentbit-official https://github.com/grafana/loki-fundamentals.git
+ ```
+
+1. Next we will spin up our observability stack using Docker Compose:
+
+ ```bash
+ docker compose -f loki-fundamentals/docker-compose.yml up -d
+ ```
+
+ This will spin up the following services:
+ ```console
+ ✔ Container loki-fundamentals-grafana-1 Started
+ ✔ Container loki-fundamentals-loki-1 Started
+ ✔ Container loki-fundamentals-fluent-bit-1 Started
+ ```
+Once we have finished configuring the Fluent Bit agent and sending logs to Loki, we will be able to view the logs in Grafana. To check if Grafana is up and running, navigate to the following URL: [http://localhost:3000](http://localhost:3000)
+<!-- INTERACTIVE page step1.md END -->
+
+<!-- INTERACTIVE page step2.md START -->
+
+## Step 2: Configure Fluent Bit to send logs to Loki
+
+To configure Fluent Bit to receive logs from our application, we need to provide a configuration file. This configuration file will define the components and their relationships. We will build the entire observability pipeline within this configuration file.
+
+### Open your code editor and locate the `fluent-bit.conf` file
+
+Fluent Bit requires a configuration file to define the components and their relationships. The configuration file is written using Fluent Bit configuration syntax. We will build the entire observability pipeline within this configuration file. To start, we will open the `fluent-bit.conf` file in the code editor:
+
+{{< docs/ignore >}}
+> Note: Killercoda has an inbuilt Code editor which can be accessed via the `Editor` tab.
+1. Expand the `loki-fundamentals` directory in the file explorer of the `Editor` tab.
+1. Locate the `fluent-bit.conf` file in the top level directory, `loki-fundamentals`.
+1. Click on the `fluent-bit.conf` file to open it in the code editor.
+{{< /docs/ignore >}}
+
+<!-- INTERACTIVE ignore START -->
+1. Open the `loki-fundamentals` directory in a code editor of your choice.
+1. Locate the `fluent-bit.conf` file in the `loki-fundamentals` directory (Top level directory).
+1. Click on the `fluent-bit.conf` file to open it in the code editor.
+<!-- INTERACTIVE ignore END -->
+
+You will copy all of the configuration snippets into the `fluent-bit.conf` file.
+
+### Receiving Fluent Bit protocal logs
+
+The first step is to configure Fluent Bit to receive logs from the Carnivorous Greenhouse application. Since the application is instrumented with Fluent Bit logging framework, it will send logs using the forward protocol (unique to Fluent Bit). We will use the `forward` input plugin to receive logs from the application.
+
+Now add the following configuration to the `fluent-bit.conf` file:
+```conf
+[INPUT]
+ Name forward
+ Listen 0.0.0.0
+ Port 24224
+```
+
+In this configuration:
+- `Name`: The name of the input plugin. In this case, we are using the `forward` input plugin.
+- `Listen`: The IP address to listen on. In this case, we are listening on all IP addresses.
+- `Port`: The port to listen on. In this case, we are listening on port `24224`.
+
+For more information on the `forward` input plugin, see the [Fluent Bit Forward documentation](https://docs.fluentbit.io/manual/pipeline/inputs/forward).
+
+
+
+### Export logs to Loki using the official Loki output plugin
+
+Lastly, we will configure Fluent Bit to export logs to Loki using the official Loki output plugin. The Loki output plugin allows you to send logs or events to a Loki service. It supports data enrichment with Kubernetes labels, custom label keys, and structured metadata.
+
+Add the following configuration to the `fluent-bit.conf` file:
+```conf
+[OUTPUT]
+ name loki
+ match service.**
+ host loki
+ port 3100
+ labels agent=fluent-bit
+ label_map_path /fluent-bit/etc/conf/logmap.json
+```
+
+In this configuration:
+- `name`: The name of the output plugin. In this case, we are using the `loki` output plugin.
+- `match`: The tag to match. In this case, we are matching all logs with the tag `service.**`.
+- `host`: The hostname of the Loki service. In this case, we are using the hostname `loki`.
+- `port`: The port of the Loki service. In this case, we are using port `3100`.
+- `labels`: Additional labels to add to the logs. In this case, we are adding the label `agent=fluent-bit`.
+- `label_map_path`: The path to the label map file. In this case, we are using the file `logmap.json`.
+
+For more information on the `loki` output plugin, see the [Fluent Bit Loki documentation](https://docs.fluentbit.io/manual/pipeline/outputs/loki).
+
+#### `logmap.json` file
+
+The `logmap.json` file is used to map the log fields to the Loki labels. In this tutorial we have pre-filled the `logmap.json` file with the following configuration:
+```json
+{
+"service": "service_name",
+"instance_id": "instance_id"
+ }
+```
+This configuration maps the `service` field to the Loki label `service_name` and the `instance_id` field to the Loki label `instance_id`.
+
+
+### Reload the Fluent Bit configuration
+
+After adding the configuration to the `fluent-bit.conf` file, you will need to reload the Fluent Bit configuration. To reload the configuration, run the following command:
+
+```bash
+docker restart loki-fundamentals-fluent-bit-1
+```
+To verify that the configuration has been loaded successfully, you can check the Fluent Bit logs by running the following command:
+
+```bash
+docker logs loki-fundamentals-fluent-bit-1
+```
+
+## Stuck? Need help?
+
+If you get stuck or need help creating the configuration, you can copy and replace the entire `config.alloy` using the completed configuration file:
+
+```bash
+cp loki-fundamentals/completed/fluent-bit.conf loki-fundamentals/fluent-bit.conf
+docker restart loki-fundamentals-fluent-bit-1
+```
+
+<!-- INTERACTIVE page step2.md END -->
+
+<!-- INTERACTIVE page step3.md START -->
+
+## Step 3: Start the Carnivorous Greenhouse
+
+In this step, we will start the Carnivorous Greenhouse application. To start the application, run the following command:
+<!-- INTERACTIVE ignore START -->
+{{< admonition type="note" >}}
+This docker-compose file relies on the `loki-fundamentals_loki` Docker network. If you have not started the observability stack, you will need to start it first.
+{{< /admonition >}}
+<!-- INTERACTIVE ignore END -->
+
+{{< docs/ignore >}}
+
+> Note: This docker-compose file relies on the `loki-fundamentals_loki` docker network. If you have not started the observability stack, you will need to start it first.
+
+{{< /docs/ignore >}}
+
+```bash
+docker compose -f loki-fundamentals/greenhouse/docker-compose-micro.yml up -d --build
+```
+
+This will start the following services:
+```bash
+ ✔ Container greenhouse-db-1 Started
+ ✔ Container greenhouse-websocket_service-1 Started
+ ✔ Container greenhouse-bug_service-1 Started
+ ✔ Container greenhouse-user_service-1 Started
+ ✔ Container greenhouse-plant_service-1 Started
+ ✔ Container greenhouse-simulation_service-1 Started
+ ✔ Container greenhouse-main_app-1 Started
+```
+
+Once started, you can access the Carnivorous Greenhouse application at [http://localhost:5005](http://localhost:5005). Generate some logs by interacting with the application in the following ways:
+
+1. Create a user.
+1. Log in.
+1. Create a few plants to monitor.
+1. Enable bug mode to activate the bug service. This will cause services to fail and generate additional logs.
+
+Finally to view the logs in Loki, navigate to the Loki Logs Explore view in Grafana at [http://localhost:3000/a/grafana-lokiexplore-app/explore](http://localhost:3000/a/grafana-lokiexplore-app/explore).
+
+
+<!-- INTERACTIVE page step3.md END -->
+
+<!-- INTERACTIVE page finish.md START -->
+
+## Summary
+
+In this tutorial, you learned how to send logs to Loki using Fluent Bit. You configured Fluent Bit to receive logs from the Carnivorous Greenhouse application and export logs to Loki using the official Loki output plugin. Where to next?
+
+{{< docs/ignore >}}
+
+### Back to Docs
+Head back to where you started from to continue with the [Loki documentation](https://grafana.com/docs/loki/latest/send-data/alloy).
+
+{{< /docs/ignore >}}
+
+
+## Further reading
+
+For more information on Fluent Bit, refer to the following resources:
+- [Fluent Bit documentation](https://docs.fluentbit.io/manual/)
+- [Other examples of Fluent Bit configurations](https://grafana.com/docs/loki/latest/send-data/fluentbit/)
+
+## Complete metrics, logs, traces, and profiling example
+
+If you would like to use a demo that includes Mimir, Loki, Tempo, and Grafana, you can use [Introduction to Metrics, Logs, Traces, and Profiling in Grafana](https://github.com/grafana/intro-to-mlt). `Intro-to-mltp` provides a self-contained environment for learning about Mimir, Loki, Tempo, and Grafana.
+
+The project includes detailed explanations of each component and annotated configurations for a single-instance deployment. Data from `intro-to-mltp` can also be pushed to Grafana Cloud.
+
+
+<!-- INTERACTIVE page finish.md END -->
diff --git a/docs/sources/send-data/fluentbit/fluent-bit-plugin.md b/docs/sources/send-data/fluentbit/fluent-bit-plugin.md
new file mode 100644
index 0000000000000..7d0cca7393704
--- /dev/null
+++ b/docs/sources/send-data/fluentbit/fluent-bit-plugin.md
@@ -0,0 +1,148 @@
+---
+title: Fluent Bit Loki output plugin
+menuTitle: Fluent Bit
+description: Provides instructions for how to install, configure, and use the Fluent Bit client to send logs to Loki.
+aliases:
+- ../clients/fluentbit/
+weight: 500
+---
+# Fluent Bit Loki output plugin
+
+[Fluent Bit](https://fluentbit.io/) is a fast and lightweight logs and metrics processor and forwarder that can be configured with the [Fluent-bit Loki output plugin](https://docs.fluentbit.io/manual/pipeline/outputs/loki) to ship logs to Loki.
+
+You can define which log files you want to collect using the [`Tail`](https://docs.fluentbit.io/manual/pipeline/inputs/tail) or [`Stdin`](https://docs.fluentbit.io/manual/pipeline/inputs/standard-input) data pipeline input. Additionally, Fluent Bit supports multiple `Filter` and `Parser` plugins (`Kubernetes`, `JSON`, etc.) to structure and alter log lines.
+
+{{< admonition type="note" >}}
+There are two Fluent Bit plugins for Loki: the officially maintained plugin `loki` and the `grafana-loki` plugin. We recommend using the `loki` plugin described within this page as it's officially maintained by the Fluent Bit project.
+
+For more information, see the [Fluent Bit Loki output plugin documentation](https://docs.fluentbit.io/manual/pipeline/outputs/loki). Note that the `grafana-loki` plugin is no longer actively maintained.
+{{< /admonition >}}
+
+## Configuration
+
+All configuration options for the Fluent Bit Loki output plugin are documented in the [Fluent Bit Loki output plugin documentation](https://docs.fluentbit.io/manual/pipeline/outputs/loki#configuration-parameters).
+
+Here is a generic example for connecting Fluent Bit to Loki hosted on Grafana Cloud:
+
+```conf
+ [OUTPUT]
+ Name loki
+ Match *
+ Host YourHostname.company.com
+ port 443
+ tls on
+ tls.verify on
+ http_user XXX
+ http_passwd XXX
+```
+
+Replace `Host`, `http_user`, and `http_passwd` with your Grafana Cloud Loki endpoint and credentials.
+
+
+## Usage examples
+
+Here are some examples of how to use Fluent Bit to send logs to Loki.
+
+### Tail Docker logs
+
+Here is an example to run Fluent Bit in a Docker container, collect Docker logs, and send them to a local Loki instance.
+
+```bash
+docker run -v /var/lib/docker/containers:/var/lib/docker/containers fluent/fluent-bit:latest /fluent-bit/bin/fluent-bit -i tail -p Path="/var/lib/docker/containers/*/*.log" -p Parser=docker -p Tag="docker.*" -o loki -p host=loki -p port=3100 -p labels="agent=fluend-bit,env=docker"
+```
+
+In this example, we are using the `tail` input plugin to collect Docker logs and the `loki` output plugin to send logs to Loki. Note it is recommended to use a configuration file to define the input and output plugins. The `-p` flag is used to pass configuration parameters to the plugins.
+
+#### Configuration file (Alternative to command line arguments)
+
+Create a configuration file `fluent-bit.conf` with the following content:
+
+```conf
+[INPUT]
+ Name tail
+ Path /var/lib/docker/containers/*/*.log
+ Parser docker
+ Tag docker.*
+
+[OUTPUT]
+ Name loki
+ Match *
+ Host loki
+ Port 3100
+ Labels agent=fluend-bit,env=docker
+```
+
+Run Fluent Bit with the configuration file:
+
+```bash
+docker run -v /var/lib/docker/containers:/var/lib/docker/containers -v $(pwd)/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf fluent/fluent-bit:latest /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf
+```
+
+### Collect Docker events
+
+Here is an example to run Fluent Bit in a Docker container, collect docker events, and send them to a local Loki instance.
+
+```bash
+docker run -v /var/run/docker.sock:/var/run/docker.sock fluent/fluent-bit:latest /fluent-bit/bin/fluent-bit -i docker_events -o loki -p host=loki -p port=3100 -p labels="agent=fluend-bit,env=docker"
+```
+
+In this example, we are using the `docker_events` input plugin to collect Docker events and the `loki` output plugin to send logs to Loki. Note it is recommended to use a configuration file to define the input and output plugins. The `-p` flag is used to pass configuration parameters to the plugins.
+
+#### Configuration file (Alternative to command line arguments)
+
+Create a configuration file `fluent-bit.conf` with the following content:
+
+```conf
+[INPUT]
+ Name docker_events
+
+[OUTPUT]
+ Name loki
+ Match *
+ Host loki
+ Port 3100
+ Labels agent=fluent-bit,env=docker
+```
+
+Run Fluent Bit with the configuration file:
+
+```bash
+docker run -v /var/run/docker.sock:/var/run/docker.sock -v $(pwd)/fluent-bit.conf:/fluent-bit/etc/fluent-bit.conf fluent/fluent-bit:latest /fluent-bit/bin/fluent-bit -c /fluent-bit/etc/fluent-bit.conf
+```
+
+### Collect Kubernetes logs
+
+The recommended way to collect logs from Kubernetes with Fluent Bit is to use the Helm chart provided by the Fluent Bit project. The Helm chart is available at [https://github.com/fluent/helm-charts](https://github.com/fluent/helm-charts).
+
+Here is an example of how to deploy the Fluent Bit Helm chart to collect logs from Kubernetes and send them to Loki:
+
+1. Add the Fluent Bit Helm repository:
+
+ ```bash
+ helm repo add fluent https://fluent.github.io/helm-charts
+1. Create a `values.yaml` file with the following content:
+
+ ```yaml
+ config:
+ outputs: |
+ [OUTPUT]
+ Name loki
+ Match *
+ Host YourHost.Company.net
+ port 443
+ tls on
+ tls.verify on
+ http_user XXX
+ http_passwd XXX
+ Labels agent=fluend-bit
+
+ Note we are only updating the `outputs` section of the Fluent Bit configuration. This is to replace the default output plugin with the Loki output plugin. If you need to update other parts of the Fluent Bit configuration refer to the [Fluent Bit values file reference](https://github.com/fluent/helm-charts/blob/main/charts/fluent-bit/values.yaml).
+
+1. Deploy the Fluent Bit Helm chart:
+
+ ```bash
+ helm install fluent-bit fluent/fluent-bit -f values.yaml
+
+## Next steps
+
+- [Sending logs to Loki using Fluent Bit tutorial](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/fluentbit/fluent-bit-loki-tutorial/)
\ No newline at end of file
|
docs
|
Updated Fluent Bit docs to use official plugin + Sandbox (#14004)
|
eab103fddf1a90b61c013a908e005b534ea4d8d8
|
2021-06-22 18:42:56
|
sanadhis
|
docs: http_path_prefix as correct item of server_config (#3868)
| false
|
diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md
index fbe0f0a411ed6..243a3a1303049 100644
--- a/docs/sources/configuration/_index.md
+++ b/docs/sources/configuration/_index.md
@@ -203,7 +203,7 @@ The `server_config` block configures the HTTP and gRPC server of the launched se
# Base path to serve all API routes from (e.g., /v1/).
# CLI flag: -server.path-prefix
-[http_prefix: <string> | default = "/api/prom"]
+[http_path_prefix: <string> | default = ""]
```
## distributor_config
|
docs
|
http_path_prefix as correct item of server_config (#3868)
|
e3b35df5bc764dbffd7bc45a00f8988abd6692eb
|
2021-09-09 19:56:30
|
Egor
|
docs: fix link to Promtail documentation (#4293)
| false
|
diff --git a/production/README.md b/production/README.md
index e8d79b312367f..f84dc0fbab9a9 100644
--- a/production/README.md
+++ b/production/README.md
@@ -8,7 +8,7 @@ Currently there are five ways to try out Loki, in order from easier to hardest:
- [Build Loki from source](#build-and-run-from-source)
- [Get inspired by our production setup](#get-inspired-by-our-production-setup)
-For the various ways to run `promtail`, the tailing agent, see our [Promtail documentation](../docs/clients/promtail/README.md).
+For the various ways to run `promtail`, the tailing agent, see our [Promtail documentation](../docs/sources/clients/promtail/installation.md).
## Grafana Cloud: Hosted Logs
|
docs
|
fix link to Promtail documentation (#4293)
|
e5ed1578c8e6275038d102a7e046d0145bfd652a
|
2022-04-13 12:45:18
|
Mathias Chapelain
|
loki: Fix common config net interface name overwritten by ring common config (#5888)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2ce8b1dd0c296..81b6524ce5d05 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
## Main
+* [5888](https://github.com/grafana/loki/pull/5888) **Papawy** Fix common config net interface name overwritten by ring common config
* [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fix deduping issues when multiple entries with the same timestamp exist.
* [5799](https://github.com/grafana/loki/pull/5799) **cyriltovena** Fixes deduping issues when multiple entries exists with the same timestamp.
* [5780](https://github.com/grafana/loki/pull/5780) **simonswine**: Update alpine image to 3.15.4.
diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go
index d713ce608aee1..63fe392bb536c 100644
--- a/pkg/loki/config_wrapper.go
+++ b/pkg/loki/config_wrapper.go
@@ -121,20 +121,16 @@ func (c *ConfigWrapper) ApplyDynamicConfig() cfg.Source {
// - "instance-interface-names", a list of net interfaces used when looking for addresses.
func applyInstanceConfigs(r, defaults *ConfigWrapper) {
if !reflect.DeepEqual(r.Common.InstanceAddr, defaults.Common.InstanceAddr) {
- r.Ingester.LifecyclerConfig.Addr = r.Common.InstanceAddr
- r.CompactorConfig.CompactorRing.InstanceAddr = r.Common.InstanceAddr
- r.Distributor.DistributorRing.InstanceAddr = r.Common.InstanceAddr
- r.Ruler.Ring.InstanceAddr = r.Common.InstanceAddr
- r.QueryScheduler.SchedulerRing.InstanceAddr = r.Common.InstanceAddr
+ if reflect.DeepEqual(r.Common.Ring.InstanceAddr, defaults.Common.Ring.InstanceAddr) {
+ r.Common.Ring.InstanceAddr = r.Common.InstanceAddr
+ }
r.Frontend.FrontendV2.Addr = r.Common.InstanceAddr
}
if !reflect.DeepEqual(r.Common.InstanceInterfaceNames, defaults.Common.InstanceInterfaceNames) {
- r.Ingester.LifecyclerConfig.InfNames = r.Common.InstanceInterfaceNames
- r.CompactorConfig.CompactorRing.InstanceInterfaceNames = r.Common.InstanceInterfaceNames
- r.Distributor.DistributorRing.InstanceInterfaceNames = r.Common.InstanceInterfaceNames
- r.Ruler.Ring.InstanceInterfaceNames = r.Common.InstanceInterfaceNames
- r.QueryScheduler.SchedulerRing.InstanceInterfaceNames = r.Common.InstanceInterfaceNames
+ if reflect.DeepEqual(r.Common.Ring.InstanceInterfaceNames, defaults.Common.Ring.InstanceInterfaceNames) {
+ r.Common.Ring.InstanceInterfaceNames = r.Common.InstanceInterfaceNames
+ }
r.Frontend.FrontendV2.InfNames = r.Common.InstanceInterfaceNames
}
}
diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go
index ad6cd02a8fbf5..89c575ff00c83 100644
--- a/pkg/loki/config_wrapper_test.go
+++ b/pkg/loki/config_wrapper_test.go
@@ -1484,4 +1484,42 @@ common:
assert.Equal(t, []string{"ringsshouldntusethis"}, config.Frontend.FrontendV2.InfNames) // not a ring.
assert.Equal(t, []string{"ringsshouldusethis"}, config.CompactorConfig.CompactorRing.InstanceInterfaceNames)
})
+
+ t.Run("common instance net interface doesn't get overwritten by common ring config", func(t *testing.T) {
+ yamlContent := `common:
+ instance_interface_names:
+ - interface
+ ring:
+ kvstore:
+ store: inmemory`
+
+ config, _, err := configWrapperFromYAML(t, yamlContent, nil)
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"interface"}, config.Distributor.DistributorRing.InstanceInterfaceNames)
+ assert.Equal(t, []string{"interface"}, config.Ingester.LifecyclerConfig.InfNames)
+ assert.Equal(t, []string{"interface"}, config.Ruler.Ring.InstanceInterfaceNames)
+ assert.Equal(t, []string{"interface"}, config.QueryScheduler.SchedulerRing.InstanceInterfaceNames)
+ assert.Equal(t, []string{"interface"}, config.Frontend.FrontendV2.InfNames)
+ assert.Equal(t, []string{"interface"}, config.CompactorConfig.CompactorRing.InstanceInterfaceNames)
+ })
+
+ t.Run("common instance net interface doesn't supersede net interface from common ring with additional config", func(t *testing.T) {
+ yamlContent := `common:
+ instance_interface_names:
+ - ringsshouldntusethis
+ ring:
+ instance_interface_names:
+ - ringsshouldusethis
+ kvstore:
+ store: inmemory`
+
+ config, _, err := configWrapperFromYAML(t, yamlContent, nil)
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"ringsshouldusethis"}, config.Distributor.DistributorRing.InstanceInterfaceNames)
+ assert.Equal(t, []string{"ringsshouldusethis"}, config.Ingester.LifecyclerConfig.InfNames)
+ assert.Equal(t, []string{"ringsshouldusethis"}, config.Ruler.Ring.InstanceInterfaceNames)
+ assert.Equal(t, []string{"ringsshouldusethis"}, config.QueryScheduler.SchedulerRing.InstanceInterfaceNames)
+ assert.Equal(t, []string{"ringsshouldntusethis"}, config.Frontend.FrontendV2.InfNames) // not a ring.
+ assert.Equal(t, []string{"ringsshouldusethis"}, config.CompactorConfig.CompactorRing.InstanceInterfaceNames)
+ })
}
|
loki
|
Fix common config net interface name overwritten by ring common config (#5888)
|
f1ff3d252b586a039c603acfc9b28cbc2d3d2375
|
2024-11-07 20:55:02
|
George Robinson
|
chore(blooms): fix lint (#14823)
| false
|
diff --git a/pkg/bloombuild/common/tsdb.go b/pkg/bloombuild/common/tsdb.go
index 2a640de8f83ea..868828e72a337 100644
--- a/pkg/bloombuild/common/tsdb.go
+++ b/pkg/bloombuild/common/tsdb.go
@@ -135,7 +135,7 @@ func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, b
user,
bounds,
0, math.MaxInt64,
- func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) {
+ func(_ labels.Labels, fp model.Fingerprint, _ []index.ChunkMeta) (stop bool) {
select {
case <-ctx.Done():
return true
|
chore
|
fix lint (#14823)
|
b6991f29d232267c1fa0ed8dff55da72240c23f6
|
2024-08-16 18:28:52
|
renovate[bot]
|
fix(deps): update module github.com/ibm/go-sdk-core/v5 to v5.17.4 (#13892)
| false
|
diff --git a/go.mod b/go.mod
index 80d68cbe26414..e352f76f58b40 100644
--- a/go.mod
+++ b/go.mod
@@ -116,7 +116,7 @@ require (
github.com/Azure/go-autorest/autorest v0.11.29
github.com/DataDog/sketches-go v1.4.6
github.com/DmitriyVTitov/size v1.5.0
- github.com/IBM/go-sdk-core/v5 v5.17.3
+ github.com/IBM/go-sdk-core/v5 v5.17.4
github.com/IBM/ibm-cos-sdk-go v1.11.0
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27
github.com/buger/jsonparser v1.1.1
diff --git a/go.sum b/go.sum
index c6b939a942b06..eb32fd206bf54 100644
--- a/go.sum
+++ b/go.sum
@@ -251,8 +251,8 @@ github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW5
github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
-github.com/IBM/go-sdk-core/v5 v5.17.3 h1:CZSVCKzhQc/hRQZOtuEmi9dlNtWMnxJvOsPtQKP7cZ4=
-github.com/IBM/go-sdk-core/v5 v5.17.3/go.mod h1:GatGZpxlo1KaxiRN6E10/rNgWtUtx1hN/GoHSCaSPKA=
+github.com/IBM/go-sdk-core/v5 v5.17.4 h1:VGb9+mRrnS2HpHZFM5hy4J6ppIWnwNrw0G+tLSgcJLc=
+github.com/IBM/go-sdk-core/v5 v5.17.4/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns=
github.com/IBM/ibm-cos-sdk-go v1.11.0 h1:Jp55NLN3OvBwucMGpP5wNybyjncsmTZ9+GPHai/1cE8=
github.com/IBM/ibm-cos-sdk-go v1.11.0/go.mod h1:FnWOym0CvrPM0nHoXvceClOEvGVXecPpmVIO5RFjlFk=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
diff --git a/vendor/github.com/IBM/go-sdk-core/v5/core/version.go b/vendor/github.com/IBM/go-sdk-core/v5/core/version.go
index 48ade6942e6d4..b7869180380f2 100644
--- a/vendor/github.com/IBM/go-sdk-core/v5/core/version.go
+++ b/vendor/github.com/IBM/go-sdk-core/v5/core/version.go
@@ -15,4 +15,4 @@ package core
// limitations under the License.
// Version of the SDK
-const __VERSION__ = "5.17.3"
+const __VERSION__ = "5.17.4"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1c961c082295d..91d2fdf4ad96f 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -199,7 +199,7 @@ github.com/DataDog/sketches-go/ddsketch/store
# github.com/DmitriyVTitov/size v1.5.0
## explicit; go 1.14
github.com/DmitriyVTitov/size
-# github.com/IBM/go-sdk-core/v5 v5.17.3
+# github.com/IBM/go-sdk-core/v5 v5.17.4
## explicit; go 1.20
github.com/IBM/go-sdk-core/v5/core
# github.com/IBM/ibm-cos-sdk-go v1.11.0
|
fix
|
update module github.com/ibm/go-sdk-core/v5 to v5.17.4 (#13892)
|
8f6d31c34e64309d95828e4af0d2b753bef87aa7
|
2023-04-15 00:49:29
|
Alfredo
|
docs: update template function (#9037)
| false
|
diff --git a/docs/sources/logql/template_functions.md b/docs/sources/logql/template_functions.md
index 18abf34587bbc..2006cbb74a487 100644
--- a/docs/sources/logql/template_functions.md
+++ b/docs/sources/logql/template_functions.md
@@ -25,6 +25,16 @@ Example:
{{ .path | replace " " "_" | trunc 5 | upper }}
```
+For function that returns a `bool` such as `contains`, `eq`, `hasPrefix` and `hasSuffix`, you can apply `AND` / `OR` and nested `if` logic.
+
+Example:
+
+```template
+{{ if and (contains "he" "hello") (contains "llo" "hello") }} yes {{end}}
+{{ if or (contains "he" "hello") (contains("llo" "hello") }} yes {{end}}
+{{ if contains .err "ErrTimeout" }} timeout {{else if contains "he" "hello"}} yes {{else}} no {{end}}
+```
+
## __line__
This function returns the current log line.
@@ -273,6 +283,19 @@ Examples:
{{ if contains "he" "hello" }} yes {{end}}
```
+## eq
+
+Use this function to test to see if one string has exact matching inside of another.
+
+Signature: `eq(s string, src string) bool`
+
+Examples:
+
+```template
+{{ if eq .err "ErrTimeout" }} timeout {{end}}
+{{ if eq "he" "hello" }} yes {{end}}
+```
+
## hasPrefix and hasSuffix
The `hasPrefix` and `hasSuffix` functions test whether a string has a given prefix or suffix.
|
docs
|
update template function (#9037)
|
1f1dd81555847134a5e923a29a44032eb1f296e0
|
2022-11-10 16:16:21
|
MarNicGit
|
promtail: Exclude event message (#7462)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 01c5e75866943..c47a36d3acf07 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -100,6 +100,7 @@ Check the history of the branch FIXME.
* [6656](https://github.com/grafana/loki/pull/6656) **carlospeon**: Allow promtail to add matches to the journal reader
* [7401](https://github.com/grafana/loki/pull/7401) **thepalbi**: Add timeout to GCP Logs push target
* [7414](https://github.com/grafana/loki/pull/7414) **thepalbi**: Add basic tracing support
+* [7462](https://github.com/grafana/loki/pull/7462) **MarNicGit**: Allow excluding event message from Windows Event Log entries.
##### Fixes
* [7394](https://github.com/grafana/loki/pull/7394) **liguozhong**: Fix issue with the Cloudflare target that caused it to stop working after it received an error in the logpull request as explained in issue https://github.com/grafana/loki/issues/6150
diff --git a/clients/pkg/promtail/scrapeconfig/scrapeconfig.go b/clients/pkg/promtail/scrapeconfig/scrapeconfig.go
index fa99768db0f38..ec8b0daa6c4a3 100644
--- a/clients/pkg/promtail/scrapeconfig/scrapeconfig.go
+++ b/clients/pkg/promtail/scrapeconfig/scrapeconfig.go
@@ -236,6 +236,9 @@ type WindowsEventsTargetConfig struct {
// ExcludeEventData allows to exclude the xml event data.
ExcludeEventData bool `yaml:"exclude_event_data"`
+ // ExcludeEventMessage allows to exclude the human-friendly message contained in each windows event.
+ ExcludeEventMessage bool `yaml:"exclude_event_message"`
+
// ExcludeUserData allows to exclude the user data of each windows event.
ExcludeUserData bool `yaml:"exclude_user_data"`
diff --git a/clients/pkg/promtail/targets/windows/format.go b/clients/pkg/promtail/targets/windows/format.go
index 03c83ef54ce16..9fc44cc62a8ba 100644
--- a/clients/pkg/promtail/targets/windows/format.go
+++ b/clients/pkg/promtail/targets/windows/format.go
@@ -73,7 +73,6 @@ func formatLine(cfg *scrapeconfig.WindowsEventsTargetConfig, event win_eventlog.
Keywords: event.Keywords,
TimeCreated: event.TimeCreated.SystemTime,
EventRecordID: event.EventRecordID,
- Message: event.Message,
}
if !cfg.ExcludeEventData {
@@ -82,6 +81,9 @@ func formatLine(cfg *scrapeconfig.WindowsEventsTargetConfig, event win_eventlog.
if !cfg.ExcludeUserData {
structuredEvent.UserData = string(event.UserData.InnerXML)
}
+ if !cfg.ExcludeEventMessage {
+ structuredEvent.Message = event.Message
+ }
if event.Correlation.ActivityID != "" || event.Correlation.RelatedActivityID != "" {
structuredEvent.Correlation = &Correlation{
ActivityID: event.Correlation.ActivityID,
diff --git a/clients/pkg/promtail/targets/windows/target_test.go b/clients/pkg/promtail/targets/windows/target_test.go
index fffc123af5507..932630b93a0b6 100644
--- a/clients/pkg/promtail/targets/windows/target_test.go
+++ b/clients/pkg/promtail/targets/windows/target_test.go
@@ -134,10 +134,10 @@ func Test_renderEntries(t *testing.T) {
{
Source: win_eventlog.Provider{Name: "Application"},
EventID: 10,
- Version: 10,
- Level: 10,
- Task: 10,
- Opcode: 10,
+ Version: 20,
+ Level: 30,
+ Task: 40,
+ Opcode: 50,
Keywords: "keywords",
TimeCreated: win_eventlog.TimeCreated{SystemTime: time.Unix(0, 1).UTC().Format(time.RFC3339Nano)},
EventRecordID: 11,
@@ -156,7 +156,51 @@ func Test_renderEntries(t *testing.T) {
Labels: model.LabelSet{"channel": "channel", "computer": "local", "job": "windows-events"},
Entry: logproto.Entry{
Timestamp: time.Unix(0, 1).UTC(),
- Line: `{"source":"Application","channel":"channel","computer":"local","event_id":10,"version":10,"level":10,"task":10,"opCode":10,"keywords":"keywords","timeCreated":"1970-01-01T00:00:00.000000001Z","eventRecordID":11,"correlation":{"activityID":"some activity","relatedActivityID":"some related activity"},"execution":{"processId":1,"threadId":5},"security":{"userId":"1"},"user_data":"userdata","event_data":"eventdata","message":"message"}`,
+ Line: `{"source":"Application","channel":"channel","computer":"local","event_id":10,"version":20,"level":30,"task":40,"opCode":50,"keywords":"keywords","timeCreated":"1970-01-01T00:00:00.000000001Z","eventRecordID":11,"correlation":{"activityID":"some activity","relatedActivityID":"some related activity"},"execution":{"processId":1,"threadId":5},"security":{"userId":"1"},"user_data":"userdata","event_data":"eventdata","message":"message"}`,
+ },
+ },
+ }, entries)
+}
+
+func Test_renderEntries_ExcludeEventMessage(t *testing.T) {
+ client := fake.New(func() {})
+ defer client.Stop()
+ ta, err := New(util_log.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{
+ Labels: model.LabelSet{"job": "windows-events"},
+ EventlogName: "Application",
+ Query: "*",
+ UseIncomingTimestamp: true,
+ ExcludeEventMessage: true,
+ })
+ require.NoError(t, err)
+ defer ta.Stop()
+ entries := ta.renderEntries([]win_eventlog.Event{
+ {
+ Source: win_eventlog.Provider{Name: "Application"},
+ EventID: 10,
+ Version: 20,
+ Level: 30,
+ Task: 40,
+ Opcode: 50,
+ Keywords: "keywords",
+ TimeCreated: win_eventlog.TimeCreated{SystemTime: time.Unix(0, 1).UTC().Format(time.RFC3339Nano)},
+ EventRecordID: 11,
+ Correlation: win_eventlog.Correlation{ActivityID: "some activity", RelatedActivityID: "some related activity"},
+ Execution: win_eventlog.Execution{ThreadID: 5, ProcessID: 1},
+ Channel: "channel",
+ Computer: "local",
+ Security: win_eventlog.Security{UserID: "1"},
+ UserData: win_eventlog.UserData{InnerXML: []byte(`userdata`)},
+ EventData: win_eventlog.EventData{InnerXML: []byte(`eventdata`)},
+ Message: "message",
+ },
+ })
+ require.Equal(t, []api.Entry{
+ {
+ Labels: model.LabelSet{"channel": "channel", "computer": "local", "job": "windows-events"},
+ Entry: logproto.Entry{
+ Timestamp: time.Unix(0, 1).UTC(),
+ Line: `{"source":"Application","channel":"channel","computer":"local","event_id":10,"version":20,"level":30,"task":40,"opCode":50,"keywords":"keywords","timeCreated":"1970-01-01T00:00:00.000000001Z","eventRecordID":11,"correlation":{"activityID":"some activity","relatedActivityID":"some related activity"},"execution":{"processId":1,"threadId":5},"security":{"userId":"1"},"user_data":"userdata","event_data":"eventdata"}`,
},
},
}, entries)
diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md
index a4aab1b3a9e60..a72c56bbdbad7 100644
--- a/docs/sources/clients/promtail/configuration.md
+++ b/docs/sources/clients/promtail/configuration.md
@@ -931,6 +931,9 @@ You can add additional labels with the `labels` property.
# Allows to exclude the xml event data.
[exclude_event_data: <bool> | default = false]
+# Allows to exclude the human-friendly event message.
+[exclude_event_message: <bool> | default = false]
+
# Allows to exclude the user data of each windows event.
[exclude_user_data: <bool> | default = false]
|
promtail
|
Exclude event message (#7462)
|
8a3ae223ba160584d61bd5cb39b546a3c28f46b5
|
2024-07-26 14:37:36
|
George Robinson
|
fix: fix a bug where AppendRequest with no entries triggers flush (#13672)
| false
|
diff --git a/pkg/storage/wal/manager.go b/pkg/storage/wal/manager.go
index fc23cb21e742f..1a7b73047bbdb 100644
--- a/pkg/storage/wal/manager.go
+++ b/pkg/storage/wal/manager.go
@@ -156,7 +156,7 @@ func (m *Manager) Append(r AppendRequest) (*AppendResult, error) {
s.w.Append(r.TenantID, r.LabelsStr, r.Labels, r.Entries, m.clock.Now())
// If the segment exceeded the maximum age or the maximum size, move s to
// the closed list to be flushed.
- if m.clock.Since(s.w.firstAppend) >= m.cfg.MaxAge || s.w.InputSize() >= m.cfg.MaxSegmentSize {
+ if s.w.Age(m.clock.Now()) >= m.cfg.MaxAge || s.w.InputSize() >= m.cfg.MaxSegmentSize {
m.move(el, s)
}
return s.r, nil
@@ -224,7 +224,7 @@ func (m *Manager) move(el *list.Element, s *segment) {
func (m *Manager) moveFrontIfExpired() bool {
if el := m.available.Front(); el != nil {
s := el.Value.(*segment)
- if !s.w.firstAppend.IsZero() && m.clock.Since(s.w.firstAppend) >= m.cfg.MaxAge {
+ if s.w.Age(m.clock.Now()) >= m.cfg.MaxAge {
m.move(el, s)
return true
}
diff --git a/pkg/storage/wal/manager_test.go b/pkg/storage/wal/manager_test.go
index 93e10fbaa06a9..1a14d999f5ecd 100644
--- a/pkg/storage/wal/manager_test.go
+++ b/pkg/storage/wal/manager_test.go
@@ -54,6 +54,38 @@ func TestManager_Append(t *testing.T) {
require.NoError(t, res.Err())
}
+func TestManager_AppendNoEntries(t *testing.T) {
+ m, err := NewManager(Config{
+ MaxAge: 30 * time.Second,
+ MaxSegments: 1,
+ MaxSegmentSize: 1024, // 1KB
+ }, NewManagerMetrics(nil))
+ require.NoError(t, err)
+
+ // Append no entries.
+ lbs := labels.Labels{{Name: "a", Value: "b"}}
+ res, err := m.Append(AppendRequest{
+ TenantID: "1",
+ Labels: lbs,
+ LabelsStr: lbs.String(),
+ Entries: []*logproto.Entry{},
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+
+ // The data hasn't been flushed, so reading from Done() should block.
+ select {
+ case <-res.Done():
+ t.Fatal("unexpected closed Done()")
+ default:
+ }
+
+ // The segment that was just appended to has neither reached the maximum
+ // age nor maximum size to be flushed.
+ require.Equal(t, 1, m.available.Len())
+ require.Equal(t, 0, m.pending.Len())
+}
+
func TestManager_AppendFailed(t *testing.T) {
m, err := NewManager(Config{
MaxAge: 30 * time.Second,
diff --git a/pkg/storage/wal/segment.go b/pkg/storage/wal/segment.go
index 5922d38fe7395..65ee7c093d2ff 100644
--- a/pkg/storage/wal/segment.go
+++ b/pkg/storage/wal/segment.go
@@ -140,6 +140,9 @@ func NewWalSegmentWriter() (*SegmentWriter, error) {
// Age returns the age of the segment.
func (b *SegmentWriter) Age(now time.Time) time.Duration {
+ if b.firstAppend.IsZero() {
+ return 0
+ }
return now.Sub(b.firstAppend)
}
|
fix
|
fix a bug where AppendRequest with no entries triggers flush (#13672)
|
11b92eeb95612a2bb002ea22f048c55ae20557a2
|
2024-10-01 04:48:16
|
Hans Kristian Flaatten
|
feat(helm): update chart with loki version 3.2.0 (#14281)
| false
|
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 2f1ee3541c716..18d0aca93a3d6 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 6.17.0
+
+- [CHANGE] Changed version of Grafana Loki to 3.2.0
+
## 6.16.0
- [ENHANCEMENT] Allow setting nodeSelector, tolerations and affinity to enterprise components (tokengen and provisioner).
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 69baa3b24ea06..07941278dfa0a 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -2,8 +2,8 @@ apiVersion: v2
name: loki
description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
type: application
-appVersion: 3.1.1
-version: 6.16.0
+appVersion: 3.2.0
+version: 6.17.0
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index 235c31643d103..a3a9c0bfd3b80 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes.
|
feat
|
update chart with loki version 3.2.0 (#14281)
|
96346be8fb2ae6c0e9576252b4df5df2cc7c1533
|
2023-08-15 17:22:20
|
dependabot[bot]
|
build(deps): bump github.com/pierrec/lz4/v4 from 4.1.17 to 4.1.18 (#10138)
| false
|
diff --git a/go.mod b/go.mod
index 5bde4a0af99dd..98b0c605f6900 100644
--- a/go.mod
+++ b/go.mod
@@ -80,7 +80,7 @@ require (
github.com/opentracing/opentracing-go v1.2.0
github.com/oschwald/geoip2-golang v1.9.0
// github.com/pierrec/lz4 v2.0.5+incompatible
- github.com/pierrec/lz4/v4 v4.1.17
+ github.com/pierrec/lz4/v4 v4.1.18
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_model v0.4.0
diff --git a/go.sum b/go.sum
index 6f7c84d6437dd..142648a5bb970 100644
--- a/go.sum
+++ b/go.sum
@@ -1484,8 +1484,8 @@ github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
-github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
+github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
index c43e8a8d28e44..d2fe11b8ea12b 100644
--- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
@@ -185,7 +185,7 @@ copyMatchTry8:
// A 16-at-a-time loop doesn't provide a further speedup.
CMP $8, len
CCMP HS, offset, $8, $0
- BLO copyMatchLoop1
+ BLO copyMatchTry4
AND $7, len, lenRem
SUB $8, len
@@ -201,8 +201,19 @@ copyMatchLoop8:
MOVD tmp2, -8(dst)
B copyMatchDone
+copyMatchTry4:
+ // Copy words if both len and offset are at least four.
+ CMP $4, len
+ CCMP HS, offset, $4, $0
+ BLO copyMatchLoop1
+
+ MOVWU.P 4(match), tmp2
+ MOVWU.P tmp2, 4(dst)
+ SUBS $4, len
+ BEQ copyMatchDone
+
copyMatchLoop1:
- // Byte-at-a-time copy for small offsets.
+ // Byte-at-a-time copy for small offsets <= 3.
MOVBU.P 1(match), tmp2
MOVB.P tmp2, 1(dst)
SUBS $1, len
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 20d9bcf74276a..86965fb3bc9e4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1162,7 +1162,7 @@ github.com/oschwald/geoip2-golang
# github.com/oschwald/maxminddb-golang v1.11.0
## explicit; go 1.19
github.com/oschwald/maxminddb-golang
-# github.com/pierrec/lz4/v4 v4.1.17
+# github.com/pierrec/lz4/v4 v4.1.18
## explicit; go 1.14
github.com/pierrec/lz4/v4
github.com/pierrec/lz4/v4/internal/lz4block
|
build
|
bump github.com/pierrec/lz4/v4 from 4.1.17 to 4.1.18 (#10138)
|
aa3716e84c29a9bcbbfc5baaec8d85669927f6a2
|
2022-09-03 01:37:37
|
Karsten Jeschkies
|
helm: Publish Helm charts to grafana/helm-charts. (#6955)
| false
|
diff --git a/.github/workflows/helm-release.yaml b/.github/workflows/helm-release.yaml
new file mode 100644
index 0000000000000..0dd3f1992dd96
--- /dev/null
+++ b/.github/workflows/helm-release.yaml
@@ -0,0 +1,20 @@
+name: helm-release
+
+on:
+ push:
+ branches:
+ - main
+ tags:
+ - "v[0-9]+.[0-9]+.[0-9]+"
+
+jobs:
+ call-update-helm-repo:
+ uses: grafana/helm-charts/.github/workflows/update-helm-repo.yaml@main
+ with:
+ charts_dir: production/helm
+ cr_configfile: production/helm/cr.yaml
+ ct_configfile: production/helm/ct.yaml
+ secrets:
+ helm_repo_token: ${{ secrets.GH_BOT_ACCESS_TOKEN }}
+ # See https://github.com/grafana/helm-charts/blob/main/INTERNAL.md about this key
+ gpg_key_base64: ${{ secrets.HELM_SIGN_KEY_BASE64 }}
diff --git a/production/helm/cr.yaml b/production/helm/cr.yaml
new file mode 100644
index 0000000000000..45d502964bd97
--- /dev/null
+++ b/production/helm/cr.yaml
@@ -0,0 +1,5 @@
+git-repo: helm-charts
+key: Grafana Loki
+owner: grafana
+sign: true
+skip-existing: true
|
helm
|
Publish Helm charts to grafana/helm-charts. (#6955)
|
5ee0d093897a1c0b33edb4f7e3a3d7bd492d49d9
|
2022-09-27 19:42:13
|
Bryan Boreham
|
chunks: update klauspost/compress package to v1.15.11 (#7263)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 96f697c043ff0..bcf6b104505a7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -19,6 +19,7 @@
* [6179](https://github.com/grafana/loki/pull/6179) **chaudum**: Add new HTTP endpoint to delete ingester ring token file and shutdown process gracefully
* [5997](https://github.com/grafana/loki/pull/5997) **simonswine**: Querier: parallize label queries to both stores.
* [5406](https://github.com/grafana/loki/pull/5406) **ctovena**: Revise the configuration parameters that configure the usage report to grafana.com.
+* [7263](https://github.com/grafana/loki/pull/7263) **bboreham**: Dependencies: klauspost/compress package to v1.15.11; improves performance.
##### Fixes
* [7040](https://github.com/grafana/loki/pull/7040) **bakunowski**: Remove duplicated `loki_boltdb_shipper` prefix from `tables_upload_operation_total` metric.
diff --git a/go.mod b/go.mod
index f0c5c81411cbe..16df8aef85bdb 100644
--- a/go.mod
+++ b/go.mod
@@ -63,7 +63,7 @@ require (
github.com/jmespath/go-jmespath v0.4.0
github.com/joncrlsn/dque v2.2.1-0.20200515025108-956d14155fa2+incompatible
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.14.1
+ github.com/klauspost/compress v1.15.11
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-ieproxy v0.0.1
github.com/minio/minio-go/v7 v7.0.24
diff --git a/go.sum b/go.sum
index ca3ee26c97656..f8d6eac787928 100644
--- a/go.sum
+++ b/go.sum
@@ -1082,8 +1082,8 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds=
-github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
+github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore
index b35f8449bf280..d31b378152793 100644
--- a/vendor/github.com/klauspost/compress/.gitignore
+++ b/vendor/github.com/klauspost/compress/.gitignore
@@ -23,3 +23,10 @@ _testmain.go
*.test
*.prof
/s2/cmd/_s2sx/sfx-exe
+
+# Linux perf files
+perf.data
+perf.data.old
+
+# gdb history
+.gdb_history
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index e8ff994f8bcb3..3c00c1af9688e 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,13 +17,136 @@ This package provides various compression algorithms.
# changelog
+* Sept 16, 2022 (v1.15.10)
+
+ * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/[email protected]/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
+ * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
+ * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
+ * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
+ * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
+ * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
+ * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
+ * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
+
+* July 21, 2022 (v1.15.9)
+
+ * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+ * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+ * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+
+* July 13, 2022 (v1.15.8)
+
+ * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
+ * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638
+ * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636
+ * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637
+ * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634
+ * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640
+ * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639
+
+* June 29, 2022 (v1.15.7)
+
+ * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
+ * zip: Merge upstream https://github.com/klauspost/compress/pull/631
+ * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
+ * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
+ * flate: Faster histograms https://github.com/klauspost/compress/pull/620
+ * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
+
+* June 3, 2022 (v1.15.6)
+ * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
+ * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
+ * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605
+ * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606
+ * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608
+ * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612
+ * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609
+ * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607
+ * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614
+ * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610
+
+* May 25, 2022 (v1.15.5)
+ * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602
+ * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601
+ * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596
+ * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588
+ * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592
+ * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
+ * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
+ * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
+ * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+
+
+* May 11, 2022 (v1.15.4)
+ * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
+ * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
+ * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
+ * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
+
+* May 5, 2022 (v1.15.3)
+ * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
+ * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
+
+* Apr 26, 2022 (v1.15.2)
+ * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
+ * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
+ * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
+ * Minimum version is Go 1.16, added CI test on 1.18.
+
+* Mar 11, 2022 (v1.15.1)
+ * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
+ * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
+ * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
+ * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
+ * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
+
+* Mar 3, 2022 (v1.15.0)
+ * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
+ * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+
+Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
+
+Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
+
+While the release has been extensively tested, it is recommended to testing when upgrading.
+
+<details>
+ <summary>See changes to v1.14.x</summary>
+
+* Feb 22, 2022 (v1.14.4)
+ * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
+ * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
+ * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
+ * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
+
+* Feb 17, 2022 (v1.14.3)
+ * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
+ * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
+ * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
+
+* Jan 25, 2022 (v1.14.2)
+ * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
+ * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
+ * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
+ * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
+ * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
+ * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
+
* Jan 11, 2022 (v1.14.1)
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+</details>
+<details>
+ <summary>See changes to v1.13.x</summary>
+
* Aug 30, 2021 (v1.13.5)
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
@@ -52,7 +175,12 @@ This package provides various compression algorithms.
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+</details>
+
+<details>
+ <summary>See changes to v1.12.x</summary>
+
* May 25, 2021 (v1.12.3)
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
@@ -74,9 +202,10 @@ This package provides various compression algorithms.
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+</details>
<details>
- <summary>See changes prior to v1.12.1</summary>
+ <summary>See changes to v1.11.x</summary>
* Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
@@ -135,7 +264,7 @@ This package provides various compression algorithms.
</details>
<details>
- <summary>See changes prior to v1.11.0</summary>
+ <summary>See changes to v1.10.x</summary>
* July 8, 2020 (v1.10.11)
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
@@ -297,11 +426,6 @@ This package provides various compression algorithms.
# deflate usage
-* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
-* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
-* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
-
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation
@@ -323,6 +447,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range.
If you expect to have a lot of concurrently allocated Writers consider using
the stateless compress described below.
+For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index b27f5a93bc2ba..07265ddede800 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -10,9 +10,6 @@ import (
"fmt"
"io"
"math"
- "math/bits"
-
- comp "github.com/klauspost/compress"
)
const (
@@ -76,8 +73,8 @@ var levels = []compressionLevel{
{0, 0, 0, 0, 0, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
- {6, 10, 12, 16, skipNever, 7},
- {10, 24, 32, 64, skipNever, 8},
+ {8, 12, 16, 24, skipNever, 7},
+ {16, 30, 40, 64, skipNever, 8},
{32, 258, 258, 1024, skipNever, 9},
}
@@ -87,29 +84,29 @@ type advancedState struct {
length int
offset int
maxInsertIndex int
+ chainHead int
+ hashOffset int
- // Input hash chains
- // hashHead[hashValue] contains the largest inputIndex with the specified hash value
- // If hashHead[hashValue] is within the current window, then
- // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
- // with the same hash value.
- chainHead int
- hashHead [hashSize]uint32
- hashPrev [windowSize]uint32
- hashOffset int
+ ii uint16 // position of last match, intended to overflow to reset.
// input window: unprocessed data is window[index:windowEnd]
index int
estBitsPerByte int
hashMatch [maxMatchLength + minMatchLength]uint32
- hash uint32
- ii uint16 // position of last match, intended to overflow to reset.
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
}
type compressor struct {
compressionLevel
+ h *huffmanEncoder
w *huffmanBitWriter
// compression algorithm
@@ -134,7 +131,8 @@ func (d *compressor) fillDeflate(b []byte) int {
s := d.state
if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
- copy(d.window[:], d.window[windowSize:2*windowSize])
+ //copy(d.window[:], d.window[windowSize:2*windowSize])
+ *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
s.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
@@ -261,7 +259,6 @@ func (d *compressor) fillWindow(b []byte) {
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
- s.hash = newH
}
// Update window information.
d.windowEnd += n
@@ -271,7 +268,7 @@ func (d *compressor) fillWindow(b []byte) {
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
-func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (length, offset int, ok bool) {
+func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
@@ -297,14 +294,46 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (lengt
}
offset = 0
+ cGain := 0
+ if d.chain < 100 {
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+ }
+
+ // Some like it higher (CSV), some like it lower (JSON)
+ const baseCost = 6
// Base is 4 bytes at with an additional cost.
// Matches must be better than this.
- cGain := minMatchLength*bpb - 12
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
if n > length {
- newGain := n*bpb - bits.Len32(uint32(pos-i))
+ // Calculate gain. Estimate
+ newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
+
+ //fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]))
if newGain > cGain {
length = n
offset = pos - i
@@ -345,6 +374,12 @@ func hash4(b []byte) uint32 {
return hash4u(binary.LittleEndian.Uint32(b), hashBits)
}
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> (32 - h)
+}
+
// bulkHash4 will compute hashes using the same
// algorithm as hash4
func bulkHash4(b []byte, dst []uint32) {
@@ -373,7 +408,6 @@ func (d *compressor) initDeflate() {
s.hashOffset = 1
s.length = minMatchLength - 1
s.offset = 0
- s.hash = 0
s.chainHead = -1
}
@@ -389,16 +423,19 @@ func (d *compressor) deflateLazy() {
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
- s.estBitsPerByte = 8
- if !d.sync {
- s.estBitsPerByte = comp.ShannonEntropyBits(d.window[s.index:d.windowEnd])
- s.estBitsPerByte = int(1 + float64(s.estBitsPerByte)/float64(d.windowEnd-s.index))
+ if d.windowEnd != s.index && d.chain > 100 {
+ // Get literal huffman coder.
+ if d.h == nil {
+ d.h = newHuffmanEncoder(maxFlateBlockTokens)
+ }
+ var tmp [256]uint16
+ for _, v := range d.window[s.index:d.windowEnd] {
+ tmp[v]++
+ }
+ d.h.generate(tmp[:], 15)
}
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
- if s.index < s.maxInsertIndex {
- s.hash = hash4(d.window[s.index:])
- }
for {
if sanity && s.index > d.windowEnd {
@@ -430,11 +467,11 @@ func (d *compressor) deflateLazy() {
}
if s.index < s.maxInsertIndex {
// Update the hash
- s.hash = hash4(d.window[s.index:])
- ch := s.hashHead[s.hash&hashMask]
+ hash := hash4(d.window[s.index:])
+ ch := s.hashHead[hash]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
- s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
+ s.hashHead[hash] = uint32(s.index + s.hashOffset)
}
prevLength := s.length
prevOffset := s.offset
@@ -446,7 +483,7 @@ func (d *compressor) deflateLazy() {
}
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead, s.estBitsPerByte); ok {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
s.length = newLength
s.offset = newOffset
}
@@ -467,7 +504,7 @@ func (d *compressor) deflateLazy() {
end += prevIndex
idx := prevIndex + prevLength - (4 - checkOff)
h := hash4(d.window[idx:])
- ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff)
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff)
if ch2 > minIndex {
length := matchLen(d.window[prevIndex:end], d.window[ch2:])
// It seems like a pure length metric is best.
@@ -511,7 +548,6 @@ func (d *compressor) deflateLazy() {
// Set the head of the hash chain to us.
s.hashHead[newH] = uint32(di + s.hashOffset)
}
- s.hash = newH
}
s.index = newIndex
@@ -757,7 +793,6 @@ func (d *compressor) reset(w io.Writer) {
d.tokens.Reset()
s.length = minMatchLength - 1
s.offset = 0
- s.hash = 0
s.ii = 0
s.maxInsertIndex = 0
}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
index 71c75a065ea7f..bb36351a5af39 100644
--- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -7,19 +7,19 @@ package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
-// * Literal insertions: Runs of one or more symbols are inserted into the data
-// stream as is. This is accomplished through the writeByte method for a
-// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
-// Any valid stream must start with a literal insertion if no preset dictionary
-// is used.
+// - Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
//
-// * Backward copies: Runs of one or more symbols are copied from previously
-// emitted data. Backward copies come as the tuple (dist, length) where dist
-// determines how far back in the stream to copy from and length determines how
-// many bytes to copy. Note that it is valid for the length to be greater than
-// the distance. Since LZ77 uses forward copies, that situation is used to
-// perform a form of run-length encoding on repeated runs of symbols.
-// The writeCopy and tryWriteCopy are used to implement this command.
+// - Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index 0b2e54972cdd7..24caf5f70b004 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -58,17 +58,6 @@ const (
prime8bytes = 0xcf1bbcdcb7a56463
)
-func load32(b []byte, i int) uint32 {
- // Help the compiler eliminate bounds checks on the read so it can be done in a single read.
- b = b[i:]
- b = b[:4]
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load64(b []byte, i int) uint64 {
- return binary.LittleEndian.Uint64(b[i:])
-}
-
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
@@ -77,10 +66,6 @@ func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
-func hash(u uint32) uint32 {
- return (u * 0x1e35a7bd) >> tableShift
-}
-
type tableEntry struct {
offset int32
}
@@ -104,7 +89,8 @@ func (e *fastGen) addBlock(src []byte) int32 {
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
- copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
@@ -114,39 +100,36 @@ func (e *fastGen) addBlock(src []byte) int32 {
return s
}
-// hash4 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4u(u uint32, h uint8) uint32 {
- return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32)
-}
-
type tableEntryPrev struct {
Cur tableEntry
Prev tableEntry
}
-// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash4x64(u uint64, h uint8) uint32 {
- return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32)
-}
-
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <64.
func hash7(u uint64, h uint8) uint32 {
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
}
-// hash8 returns the hash of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash8(u uint64, h uint8) uint32 {
- return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64))
-}
-
-// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <64.
-func hash6(u uint64, h uint8) uint32 {
- return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64))
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+ switch mls {
+ case 3:
+ return (uint32(u<<8) * prime3bytes) >> (32 - length)
+ case 5:
+ return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+ case 6:
+ return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+ case 7:
+ return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+ case 8:
+ return uint32((u * prime8bytes) >> (64 - length))
+ default:
+ return (uint32(u) * prime4bytes) >> (32 - length)
+ }
}
// matchlen will return the match length between offsets and t in src.
@@ -179,7 +162,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
// matchlenLong will return the match length between offsets and t in src.
// It is assumed that s > t, that t >=0 and s < len(src).
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
- if debugDecode {
+ if debugDeflate {
if t >= s {
panic(fmt.Sprint("t >=s:", t, s))
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index fb1701eeccea7..89a5dd89f9835 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -8,6 +8,7 @@ import (
"encoding/binary"
"fmt"
"io"
+ "math"
)
const (
@@ -24,6 +25,10 @@ const (
codegenCodeCount = 19
badCode = 255
+ // maxPredefinedTokens is the maximum number of tokens
+ // where we check if fixed size is smaller.
+ maxPredefinedTokens = 250
+
// bufferFlushSize indicates the buffer size
// after which bytes are flushed to the writer.
// Should preferably be a multiple of 6, since
@@ -36,8 +41,11 @@ const (
bufferSize = bufferFlushSize + 8
)
+// Minimum length code that emits bits.
+const lengthExtraBitsMinCode = 8
+
// The number of extra bits needed by length code X - LENGTH_CODES_START.
-var lengthExtraBits = [32]int8{
+var lengthExtraBits = [32]uint8{
/* 257 */ 0, 0, 0,
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
@@ -51,19 +59,22 @@ var lengthBase = [32]uint8{
64, 80, 96, 112, 128, 160, 192, 224, 255,
}
+// Minimum offset code that emits bits.
+const offsetExtraBitsMinCode = 4
+
// offset code word extra bits.
-var offsetExtraBits = [64]int8{
+var offsetExtraBits = [32]int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
/* extended window */
- 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
+ 14, 14,
}
var offsetCombined = [32]uint32{}
func init() {
- var offsetBase = [64]uint32{
+ var offsetBase = [32]uint32{
/* normal deflate */
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
@@ -73,17 +84,15 @@ func init() {
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
/* extended window */
- 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
- 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
- 0x100000, 0x180000, 0x200000, 0x300000,
+ 0x008000, 0x00c000,
}
for i := range offsetCombined[:] {
// Don't use extended window values...
- if offsetBase[i] > 0x006000 {
+ if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
continue
}
- offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i])
+ offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
}
}
@@ -99,7 +108,7 @@ type huffmanBitWriter struct {
// Data waiting to be written is bytes[0:nbytes]
// and then the low nbits of bits.
bits uint64
- nbits uint16
+ nbits uint8
nbytes uint8
lastHuffMan bool
literalEncoding *huffmanEncoder
@@ -160,7 +169,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
b := w.offsetEncoding.codes
b = b[:len(a)]
for i, v := range a {
- if v != 0 && b[i].len == 0 {
+ if v != 0 && b[i].zero() {
return false
}
}
@@ -169,7 +178,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
b = w.literalEncoding.codes[256:literalCount]
b = b[:len(a)]
for i, v := range a {
- if v != 0 && b[i].len == 0 {
+ if v != 0 && b[i].zero() {
return false
}
}
@@ -177,7 +186,7 @@ func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
a = t.litHist[:256]
b = w.literalEncoding.codes[:len(a)]
for i, v := range a {
- if v != 0 && b[i].len == 0 {
+ if v != 0 && b[i].zero() {
return false
}
}
@@ -217,7 +226,7 @@ func (w *huffmanBitWriter) write(b []byte) {
_, w.err = w.writer.Write(b)
}
-func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
+func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
w.bits |= uint64(b) << (w.nbits & 63)
w.nbits += nb
if w.nbits >= 48 {
@@ -256,9 +265,9 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) {
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
// information. Code badCode is an end marker
//
-// numLiterals The number of literals in literalEncoding
-// numOffsets The number of offsets in offsetEncoding
-// litenc, offenc The literal and offset encoder to use
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
for i := range w.codegenFreq {
w.codegenFreq[i] = 0
@@ -271,12 +280,12 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE
// Copy the concatenated code sizes to codegen. Put a marker at the end.
cgnl := codegen[:numLiterals]
for i := range cgnl {
- cgnl[i] = uint8(litEnc.codes[i].len)
+ cgnl[i] = litEnc.codes[i].len()
}
cgnl = codegen[numLiterals : numLiterals+numOffsets]
for i := range cgnl {
- cgnl[i] = uint8(offEnc.codes[i].len)
+ cgnl[i] = offEnc.codes[i].len()
}
codegen[numLiterals+numOffsets] = badCode
@@ -419,8 +428,8 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
func (w *huffmanBitWriter) writeCode(c hcode) {
// The function does not get inlined if we "& 63" the shift.
- w.bits |= uint64(c.code) << (w.nbits & 63)
- w.nbits += c.len
+ w.bits |= c.code64() << (w.nbits & 63)
+ w.nbits += c.len()
if w.nbits >= 48 {
w.writeOutBits()
}
@@ -451,9 +460,9 @@ func (w *huffmanBitWriter) writeOutBits() {
// Write the header of a dynamic Huffman block to the output stream.
//
-// numLiterals The number of literals specified in codegen
-// numOffsets The number of offsets specified in codegen
-// numCodegens The number of codegens used in codegen
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
if w.err != nil {
return
@@ -468,7 +477,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
w.writeBits(int32(numCodegens-4), 4)
for i := 0; i < numCodegens; i++ {
- value := uint(w.codegenEncoding.codes[codegenOrder[i]].len)
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
w.writeBits(int32(value), 3)
}
@@ -573,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
// Fixed Huffman baseline.
var literalEncoding = fixedLiteralEncoding
var offsetEncoding = fixedOffsetEncoding
- var size = w.fixedSize(extraBits)
+ var size = math.MaxInt32
+ if tokens.n < maxPredefinedTokens {
+ size = w.fixedSize(extraBits)
+ }
// Dynamic Huffman?
var numCodegens int
@@ -658,7 +670,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
// Estimate size for using a new table.
// Use the previous header size as the best estimate.
newSize := w.lastHeader + tokens.EstimatedBits()
- newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty
+ newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
// The estimated size is calculated as an optimal table.
// We add a penalty to make it more realistic and re-use a bit more.
@@ -674,19 +686,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
size = reuseSize
}
- if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
- // Check if we get a reasonable size decrease.
- if storable && ssize <= size {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
return
}
- w.writeFixedHeader(eof)
- if !sync {
- tokens.AddEOB()
- }
- w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
- return
}
// Check if we get a reasonable size decrease.
if storable && ssize <= size {
@@ -719,19 +733,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
// Store predefined, if we don't get a reasonable improvement.
- if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
- // Store bytes, if we don't get an improvement.
- if storable && ssize <= preSize {
- w.writeStoredHeader(len(input), eof)
- w.writeBytes(input)
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+ // Store bytes, if we don't get an improvement.
+ if storable && ssize <= preSize {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
return
}
- w.writeFixedHeader(eof)
- if !sync {
- tokens.AddEOB()
- }
- w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
- return
}
if storable && ssize <= size {
@@ -774,9 +790,11 @@ func (w *huffmanBitWriter) fillTokens() {
// and offsetEncoding.
// The number of literal and offset tokens is returned.
func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
- copy(w.literalFreq[:], t.litHist[:])
- copy(w.literalFreq[256:], t.extraHist[:])
- copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
+ //copy(w.literalFreq[:], t.litHist[:])
+ *(*[256]uint16)(w.literalFreq[:]) = t.litHist
+ //copy(w.literalFreq[256:], t.extraHist[:])
+ *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
+ w.offsetFreq = t.offHist
if t.n == 0 {
return
@@ -835,11 +853,11 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
for _, t := range tokens {
- if t < matchType {
+ if t < 256 {
//w.writeCode(lits[t.literal()])
- c := lits[t.literal()]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ c := lits[t]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -860,14 +878,14 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
// Write the length
length := t.length()
- lengthCode := lengthCode(length)
+ lengthCode := lengthCode(length) & 31
if false {
- w.writeCode(lengths[lengthCode&31])
+ w.writeCode(lengths[lengthCode])
} else {
// inlined
- c := lengths[lengthCode&31]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ c := lengths[lengthCode]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -885,10 +903,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
}
- extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
- if extraLengthBits > 0 {
+ if lengthCode >= lengthExtraBitsMinCode {
+ extraLengthBits := lengthExtraBits[lengthCode]
//w.writeBits(extraLength, extraLengthBits)
- extraLength := int32(length - lengthBase[lengthCode&31])
+ extraLength := int32(length - lengthBase[lengthCode])
bits |= uint64(extraLength) << (nbits & 63)
nbits += extraLengthBits
if nbits >= 48 {
@@ -909,15 +927,14 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
// Write the offset
offset := t.offset()
- offsetCode := offset >> 16
- offset &= matchOffsetOnlyMask
+ offsetCode := (offset >> 16) & 31
if false {
- w.writeCode(offs[offsetCode&31])
+ w.writeCode(offs[offsetCode])
} else {
// inlined
c := offs[offsetCode]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -934,11 +951,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
}
}
}
- offsetComb := offsetCombined[offsetCode]
- if offsetComb > 1<<16 {
+
+ if offsetCode >= offsetExtraBitsMinCode {
+ offsetComb := offsetCombined[offsetCode]
//w.writeBits(extraOffset, extraOffsetBits)
- bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63)
- nbits += uint16(offsetComb >> 16)
+ bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
+ nbits += uint8(offsetComb)
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -993,8 +1011,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
}
}
- // Fill is rarely better...
- const fill = false
const numLiterals = endBlockMarker + 1
const numOffsets = 1
@@ -1003,26 +1019,46 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Assume header is around 70 bytes:
// https://stackoverflow.com/a/25454430
const guessHeaderSizeBits = 70 * 8
- histogram(input, w.literalFreq[:numLiterals], fill)
- w.literalFreq[endBlockMarker] = 1
- w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
- if fill {
- // Clear fill...
- for i := range w.literalFreq[:numLiterals] {
- w.literalFreq[i] = 0
+ histogram(input, w.literalFreq[:numLiterals])
+ ssize, storable := w.storedSize(input)
+ if storable && len(input) > 1024 {
+ // Quick check for incompressible content.
+ abs := float64(0)
+ avg := float64(len(input)) / 256
+ max := float64(len(input) * 2)
+ for _, v := range w.literalFreq[:256] {
+ diff := float64(v) - avg
+ abs += diff * diff
+ if abs > max {
+ break
+ }
+ }
+ if abs < max {
+ if debugDeflate {
+ fmt.Println("stored", abs, "<", max)
+ }
+ // No chance we can compress this...
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
}
- histogram(input, w.literalFreq[:numLiterals], false)
}
+ w.literalFreq[endBlockMarker] = 1
+ w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
- estBits += w.lastHeader
- if w.lastHeader == 0 {
- estBits += guessHeaderSizeBits
+ if estBits < math.MaxInt32 {
+ estBits += w.lastHeader
+ if w.lastHeader == 0 {
+ estBits += guessHeaderSizeBits
+ }
+ estBits += estBits >> w.logNewTablePenalty
}
- estBits += estBits >> w.logNewTablePenalty
// Store bytes, if we don't get a reasonable improvement.
- ssize, storable := w.storedSize(input)
if storable && ssize <= estBits {
+ if debugDeflate {
+ fmt.Println("stored,", ssize, "<=", estBits)
+ }
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
@@ -1033,7 +1069,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
if estBits < reuseSize {
if debugDeflate {
- //fmt.Println("not reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+ fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
}
// We owe an EOB
w.writeCode(w.literalEncoding.codes[endBlockMarker])
@@ -1067,6 +1103,9 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+ if debugDeflate {
+ count -= int(nbytes)*8 + int(nbits)
+ }
// Unroll, write 3 codes/loop.
// Fastest number of unrolls.
for len(input) > 3 {
@@ -1076,35 +1115,31 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
bits >>= (n * 8) & 63
nbits -= n * 8
- nbytes += uint8(n)
+ nbytes += n
}
if nbytes >= bufferFlushSize {
if w.err != nil {
nbytes = 0
return
}
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
a, b := encoding[input[0]], encoding[input[1]]
- bits |= uint64(a.code) << (nbits & 63)
- bits |= uint64(b.code) << ((nbits + a.len) & 63)
+ bits |= a.code64() << (nbits & 63)
+ bits |= b.code64() << ((nbits + a.len()) & 63)
c := encoding[input[2]]
- nbits += b.len + a.len
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
+ nbits += b.len() + a.len()
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
input = input[3:]
}
// Remaining...
for _, t := range input {
- // Bitwriting inlined, ~30% speedup
- c := encoding[t]
- bits |= uint64(c.code) << (nbits & 63)
- nbits += c.len
- if debugDeflate {
- count += int(c.len)
- }
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
@@ -1116,17 +1151,34 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
nbytes = 0
return
}
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
_, w.err = w.writer.Write(w.bytes[:nbytes])
nbytes = 0
}
}
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ bits |= c.code64() << (nbits & 63)
+
+ nbits += c.len()
+ if debugDeflate {
+ count += int(c.len())
+ }
}
// Restore...
w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
if debugDeflate {
- fmt.Println("wrote", count/8, "bytes")
+ nb := count + int(nbytes)*8 + int(nbits)
+ fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
+ }
+ // Flush if needed to have space.
+ if w.nbits >= 48 {
+ w.writeOutBits()
}
+
if eof || sync {
w.writeCode(w.literalEncoding.codes[endBlockMarker])
w.lastHeader = 0
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index 67b2b38728434..be7b58b473f90 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -16,8 +16,18 @@ const (
)
// hcode is a huffman code with a bit code and bit length.
-type hcode struct {
- code, len uint16
+type hcode uint32
+
+func (h hcode) len() uint8 {
+ return uint8(h)
+}
+
+func (h hcode) code64() uint64 {
+ return uint64(h >> 8)
+}
+
+func (h hcode) zero() bool {
+ return h == 0
}
type huffmanEncoder struct {
@@ -56,9 +66,12 @@ type levelInfo struct {
}
// set sets the code and length of an hcode.
-func (h *hcode) set(code uint16, length uint16) {
- h.len = length
- h.code = code
+func (h *hcode) set(code uint16, length uint8) {
+ *h = hcode(length) | (hcode(code) << 8)
+}
+
+func newhcode(code uint16, length uint8) hcode {
+ return hcode(length) | (hcode(code) << 8)
}
func reverseBits(number uint16, bitLength byte) uint16 {
@@ -80,7 +93,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
var ch uint16
for ch = 0; ch < literalCount; ch++ {
var bits uint16
- var size uint16
+ var size uint8
switch {
case ch < 144:
// size 8, 000110000 .. 10111111
@@ -99,7 +112,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
bits = ch + 192 - 280
size = 8
}
- codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
+ codes[ch] = newhcode(reverseBits(bits, size), size)
}
return h
}
@@ -108,7 +121,7 @@ func generateFixedOffsetEncoding() *huffmanEncoder {
h := newHuffmanEncoder(30)
codes := h.codes
for ch := range codes {
- codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5}
+ codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
}
return h
}
@@ -120,7 +133,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
var total int
for i, f := range freq {
if f != 0 {
- total += int(f) * int(h.codes[i].len)
+ total += int(f) * int(h.codes[i].len())
}
}
return total
@@ -129,9 +142,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
var total int
for _, f := range b {
- if f != 0 {
- total += int(h.codes[f].len)
- }
+ total += int(h.codes[f].len())
}
return total
}
@@ -142,10 +153,10 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
for i, f := range freq {
if f != 0 {
code := h.codes[i]
- if code.len == 0 {
+ if code.zero() {
return math.MaxInt32
}
- total += int(f) * int(code.len)
+ total += int(f) * int(code.len())
}
}
return total
@@ -157,13 +168,18 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
// The cases of 0, 1, and 2 literals are handled by special case code.
//
// list An array of the literals with non-zero frequencies
-// and their associated frequencies. The array is in order of increasing
-// frequency, and has as its last element a special element with frequency
-// MaxInt32
+//
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+//
// maxBits The maximum number of bits that should be used to encode any literal.
-// Must be less than 16.
+//
+// Must be less than 16.
+//
// return An integer array in which array[i] indicates the number of literals
-// that should be encoded in i bits.
+//
+// that should be encoded in i bits.
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
if maxBits >= maxBitsLimit {
panic("flate: maxBits too large")
@@ -189,14 +205,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// of the level j ancestor.
var leafCounts [maxBitsLimit][maxBitsLimit]int32
+ // Descending to only have 1 bounds check.
+ l2f := int32(list[2].freq)
+ l1f := int32(list[1].freq)
+ l0f := int32(list[0].freq) + int32(list[1].freq)
+
for level := int32(1); level <= maxBits; level++ {
// For every level, the first two items are the first two characters.
// We initialize the levels as if we had already figured this out.
levels[level] = levelInfo{
level: level,
- lastFreq: int32(list[1].freq),
- nextCharFreq: int32(list[2].freq),
- nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
+ lastFreq: l1f,
+ nextCharFreq: l2f,
+ nextPairFreq: l0f,
}
leafCounts[level][level] = 2
if level == 1 {
@@ -207,8 +228,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// We need a total of 2*n - 2 items at top level and have already generated 2.
levels[maxBits].needed = 2*n - 4
- level := maxBits
- for {
+ level := uint32(maxBits)
+ for level < 16 {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
// We've run out of both leafs and pairs.
@@ -240,7 +261,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
// more values in the level below
l.lastFreq = l.nextPairFreq
// Take leaf counts from the lower level, except counts[level] remains the same.
- copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ if true {
+ save := leafCounts[level][level]
+ leafCounts[level] = leafCounts[level-1]
+ leafCounts[level][level] = save
+ } else {
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ }
levels[l.level-1].needed = 2
}
@@ -298,7 +325,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
sortByLiteral(chunk)
for _, node := range chunk {
- h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
+ h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
code++
}
list = list[0 : len(list)-int(bits)]
@@ -311,6 +338,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
// maxBits The maximum number of bits to use for any literal.
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list := h.freqcache[:len(freq)+1]
+ codes := h.codes[:len(freq)]
// Number of non-zero literals
count := 0
// Set list to be the set of all non-zero literals and their frequencies
@@ -319,11 +347,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
list[count] = literalNode{uint16(i), f}
count++
} else {
- list[count] = literalNode{}
- h.codes[i].len = 0
+ codes[i] = 0
}
}
- list[len(freq)] = literalNode{}
+ list[count] = literalNode{}
list = list[:count]
if count <= 2 {
@@ -354,21 +381,37 @@ func atLeastOne(v float32) float32 {
return v
}
-// Unassigned values are assigned '1' in the histogram.
-func fillHist(b []uint16) {
- for i, v := range b {
- if v == 0 {
- b[i] = 1
+func histogram(b []byte, h []uint16) {
+ if true && len(b) >= 8<<10 {
+ // Split for bigger inputs
+ histogramSplit(b, h)
+ } else {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
}
}
}
-func histogram(b []byte, h []uint16, fill bool) {
+func histogramSplit(b []byte, h []uint16) {
+ // Tested, and slightly faster than 2-way.
+ // Writing to separate arrays and combining is also slightly slower.
h = h[:256]
- for _, t := range b {
- h[t]++
+ for len(b)&3 != 0 {
+ h[b[0]]++
+ b = b[1:]
}
- if fill {
- fillHist(h)
+ n := len(b) / 4
+ x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
+ y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
+ for i, t := range x {
+ v0 := &h[t]
+ v1 := &h[y[i]]
+ v3 := &h[w[i]]
+ v2 := &h[z[i]]
+ *v0++
+ *v1++
+ *v2++
+ *v3++
}
}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index d5f62f6a2ca4f..414c0bea9fa91 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -36,6 +36,13 @@ type lengthExtra struct {
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+var bitMask32 = [32]uint32{
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+ 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+ 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
// Initialize the fixedHuffmanDecoder only once upon first use.
var fixedOnce sync.Once
var fixedHuffmanDecoder huffmanDecoder
@@ -559,221 +566,6 @@ func (f *decompressor) readHuffman() error {
return nil
}
-// Decode a single Huffman block from f.
-// hl and hd are the Huffman states for the lit/length values
-// and the distance values, respectively. If hd == nil, using the
-// fixed distance encoding associated with fixed Huffman blocks.
-func (f *decompressor) huffmanBlockGeneric() {
- const (
- stateInit = iota // Zero value must be stateInit
- stateDict
- )
-
- switch f.stepState {
- case stateInit:
- goto readLiteral
- case stateDict:
- goto copyHistory
- }
-
-readLiteral:
- // Read literal and/or (length, distance) according to RFC section 3.2.3.
- {
- var v int
- {
- // Inlined v, err := f.huffSym(f.hl)
- // Since a huffmanDecoder can be empty or be composed of a degenerate tree
- // with single element, huffSym must error on these two edge cases. In both
- // cases, the chunks slice will be 0 for the invalid sequence, leading it
- // satisfy the n == 0 check below.
- n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
- for {
- for nb < n {
- c, err := f.r.ReadByte()
- if err != nil {
- f.b = b
- f.nb = nb
- f.err = noEOF(err)
- return
- }
- f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
- }
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
- n = uint(chunk & huffmanCountMask)
- if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
- n = uint(chunk & huffmanCountMask)
- }
- if n <= nb {
- if n == 0 {
- f.b = b
- f.nb = nb
- if debugDecode {
- fmt.Println("huffsym: n==0")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
- v = int(chunk >> huffmanValueShift)
- break
- }
- }
- }
-
- var n uint // number of bits extra
- var length int
- var err error
- switch {
- case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBlockGeneric
- f.stepState = stateInit
- return
- }
- goto readLiteral
- case v == 256:
- f.finishBlock()
- return
- // otherwise, reference to older data
- case v < 265:
- length = v - (257 - 3)
- n = 0
- case v < 269:
- length = v*2 - (265*2 - 11)
- n = 1
- case v < 273:
- length = v*4 - (269*4 - 19)
- n = 2
- case v < 277:
- length = v*8 - (273*8 - 35)
- n = 3
- case v < 281:
- length = v*16 - (277*16 - 67)
- n = 4
- case v < 285:
- length = v*32 - (281*32 - 131)
- n = 5
- case v < maxNumLit:
- length = 258
- n = 0
- default:
- if debugDecode {
- fmt.Println(v, ">= maxNumLit")
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
- if n > 0 {
- for f.nb < n {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits n>0:", err)
- }
- f.err = err
- return
- }
- }
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
- }
-
- var dist uint32
- if f.hd == nil {
- for f.nb < 5 {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<5:", err)
- }
- f.err = err
- return
- }
- }
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
- } else {
- sym, err := f.huffSym(f.hd)
- if err != nil {
- if debugDecode {
- fmt.Println("huffsym:", err)
- }
- f.err = err
- return
- }
- dist = uint32(sym)
- }
-
- switch {
- case dist < 4:
- dist++
- case dist < maxNumDist:
- nb := uint(dist-2) >> 1
- // have 1 bit in bottom of dist, need nb more.
- extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
- if err = f.moreBits(); err != nil {
- if debugDecode {
- fmt.Println("morebits f.nb<nb:", err)
- }
- f.err = err
- return
- }
- }
- extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
- dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
- default:
- if debugDecode {
- fmt.Println("dist too big:", dist, maxNumDist)
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- // No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
- if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
- }
- f.err = CorruptInputError(f.roffset)
- return
- }
-
- f.copyLen, f.copyDist = length, int(dist)
- goto copyHistory
- }
-
-copyHistory:
- // Perform a backwards copy according to RFC section 3.2.3.
- {
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
- if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
- }
- f.copyLen -= cnt
-
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
- f.stepState = stateDict
- return
- }
- goto readLiteral
- }
-}
-
// Copy a single uncompressed data block from input to output.
func (f *decompressor) dataBlock() {
// Uncompressed.
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
index cc6db27925ce1..61342b6b88fbd 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -21,6 +21,11 @@ func (f *decompressor) huffmanBytesBuffer() {
)
fr := f.r.(*bytes.Buffer)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -39,41 +44,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -83,15 +82,17 @@ readLiteral:
var length int
switch {
case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -101,9 +102,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -111,25 +113,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -137,12 +141,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -152,38 +156,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -197,9 +198,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -207,14 +209,16 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -223,9 +227,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@@ -238,20 +243,22 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -265,6 +272,11 @@ func (f *decompressor) huffmanBytesReader() {
)
fr := f.r.(*bytes.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -283,41 +295,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -327,15 +333,17 @@ readLiteral:
var length int
switch {
case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -345,9 +353,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -355,25 +364,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -381,12 +392,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -396,38 +407,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -441,9 +449,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -451,14 +460,16 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -467,9 +478,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@@ -482,20 +494,22 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBytesReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -509,6 +523,11 @@ func (f *decompressor) huffmanBufioReader() {
)
fr := f.r.(*bufio.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -527,41 +546,35 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
- // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
- // but is smart enough to keep local variables in registers, so use nb and b,
- // inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -571,15 +584,17 @@ readLiteral:
var length int
switch {
case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBufioReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -589,9 +604,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -599,25 +615,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -625,12 +643,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -640,38 +658,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -685,9 +700,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -695,14 +711,16 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -711,9 +729,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@@ -726,20 +745,22 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
f.step = (*decompressor).huffmanBufioReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
// Decode a single Huffman block from f.
@@ -753,6 +774,11 @@ func (f *decompressor) huffmanStringsReader() {
)
fr := f.r.(*strings.Reader)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
switch f.stepState {
case stateInit:
goto readLiteral
@@ -771,41 +797,286 @@ readLiteral:
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanGenericReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
v = int(chunk >> huffmanValueShift)
break
}
@@ -815,15 +1086,17 @@ readLiteral:
var length int
switch {
case v < 256:
- f.dict.writeByte(byte(v))
- if f.dict.availWrite() == 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanStringsReader
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanGenericReader
f.stepState = stateInit
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
case v == 256:
+ f.b, f.nb = fb, fnb
f.finishBlock()
return
// otherwise, reference to older data
@@ -833,9 +1106,10 @@ readLiteral:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
- for f.nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits n>0:", err)
}
@@ -843,25 +1117,27 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
- f.b >>= n & regSizeMaskUint32
- f.nb -= n
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
return
}
var dist uint32
if f.hd == nil {
- for f.nb < 5 {
+ for fnb < 5 {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
@@ -869,12 +1145,12 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
- f.b >>= 5
- f.nb -= 5
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
@@ -884,38 +1160,35 @@ readLiteral:
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
- nb, b := f.nb, f.b
for {
- for nb < n {
+ for fnb < n {
c, err := fr.ReadByte()
if err != nil {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
f.err = noEOF(err)
return
}
f.roffset++
- b |= uint32(c) << (nb & regSizeMaskUint32)
- nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
- chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
- if n <= nb {
+ if n <= fnb {
if n == 0 {
- f.b = b
- f.nb = nb
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
- f.b = b >> (n & regSizeMaskUint32)
- f.nb = nb - n
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
@@ -929,9 +1202,10 @@ readLiteral:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
- for f.nb < nb {
+ for fnb < nb {
c, err := fr.ReadByte()
if err != nil {
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
@@ -939,14 +1213,16 @@ readLiteral:
return
}
f.roffset++
- f.b |= uint32(c) << f.nb
- f.nb += 8
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
}
- extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1)
- f.b >>= nb & regSizeMaskUint32
- f.nb -= nb
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
default:
+ f.b, f.nb = fb, fnb
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
@@ -955,9 +1231,10 @@ readLiteral:
}
// No check on length; encoding can be prescient.
- if dist > uint32(f.dict.histSize()) {
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
if debugDecode {
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
@@ -970,20 +1247,22 @@ readLiteral:
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
- if f.dict.availWrite() == 0 || f.copyLen > 0 {
- f.toRead = f.dict.readFlush()
- f.step = (*decompressor).huffmanStringsReader // We need to continue this work
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = (*decompressor).huffmanGenericReader // We need to continue this work
f.stepState = stateDict
+ f.b, f.nb = fb, fnb
return
}
goto readLiteral
}
+ // Not reached
}
func (f *decompressor) huffmanBlockDecoder() func() {
@@ -996,7 +1275,9 @@ func (f *decompressor) huffmanBlockDecoder() func() {
return f.huffmanBufioReader
case *strings.Reader:
return f.huffmanStringsReader
+ case Reader:
+ return f.huffmanGenericReader
default:
- return f.huffmanBlockGeneric
+ return f.huffmanGenericReader
}
}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
index 1e5eea3968aa6..703b9a89aa394 100644
--- a/vendor/github.com/klauspost/compress/flate/level1.go
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -1,6 +1,10 @@
package flate
-import "fmt"
+import (
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+)
// fastGen maintains the table for matches,
// and the previous byte block for level 2.
@@ -15,6 +19,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
@@ -64,7 +69,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
+ cv := load6432(src, s)
for {
const skipLog = 5
@@ -73,7 +78,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
nextS := s
var candidate tableEntry
for {
- nextHash := hash(cv)
+ nextHash := hashLen(cv, tableBits, hashBytes)
candidate = e.table[nextHash]
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
@@ -82,16 +87,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
- nextHash = hash(uint32(now))
+ nextHash = hashLen(now, tableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
- cv = uint32(now)
+ cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
@@ -99,11 +104,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
- cv = uint32(now)
+ cv = now
s = nextS
}
@@ -116,7 +121,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
// Extend the 4-byte match as long as possible.
t := candidate.offset - e.cur
- l := e.matchlenLong(s+4, t+4, src) + 4
+ var l = int32(4)
+ if false {
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else {
+ // inlined:
+ a := src[s+4:]
+ b := src[t+4:]
+ for len(a) >= 8 {
+ if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
+ l += int32(bits.TrailingZeros64(diff) >> 3)
+ break
+ }
+ l += 8
+ a = a[8:]
+ b = b[8:]
+ }
+ if len(a) < 8 {
+ b = b[:len(a)]
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ l++
+ }
+ }
+ }
// Extend backwards
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
@@ -125,11 +155,43 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
// Save the match found
- dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ if false {
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ } else {
+ // Inlined...
+ xoffset := uint32(s - t - baseMatchOffset)
+ xlength := l
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ dst.extraHist[lengthCodes1[uint8(xl)]]++
+ dst.offHist[oc]++
+ dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+ dst.n++
+ }
+ }
s += l
nextEmit = s
if nextS >= s {
@@ -137,9 +199,9 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
}
if s >= sLimit {
// Index first pair after match end.
- if int(s+l+4) < len(src) {
- cv := load3232(src, s)
- e.table[hash(cv)] = tableEntry{offset: s + e.cur}
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
@@ -152,16 +214,16 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
- prevHash := hash(uint32(x))
+ prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
x >>= 16
- currHash := hash(uint32(x))
+ currHash := hashLen(x, tableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
- cv = uint32(x >> 8)
+ cv = x >> 8
s++
break
}
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
index 234c4389ab39f..876dfbe30543f 100644
--- a/vendor/github.com/klauspost/compress/flate/level2.go
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -16,6 +16,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
)
if debugDeflate && e.cur < 0 {
@@ -66,7 +67,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
+ cv := load6432(src, s)
for {
// When should we start skipping if we haven't found matches in a long while.
const skipLog = 5
@@ -75,7 +76,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
nextS := s
var candidate tableEntry
for {
- nextHash := hash4u(cv, bTableBits)
+ nextHash := hashLen(cv, bTableBits, hashBytes)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
if nextS > sLimit {
@@ -84,16 +85,16 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
candidate = e.table[nextHash]
now := load6432(src, nextS)
e.table[nextHash] = tableEntry{offset: s + e.cur}
- nextHash = hash4u(uint32(now), bTableBits)
+ nextHash = hashLen(now, bTableBits, hashBytes)
offset := s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
break
}
// Do one right away...
- cv = uint32(now)
+ cv = now
s = nextS
nextS++
candidate = e.table[nextHash]
@@ -101,10 +102,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
e.table[nextHash] = tableEntry{offset: s + e.cur}
offset = s - (candidate.offset - e.cur)
- if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
- cv = uint32(now)
+ cv = now
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
@@ -134,7 +135,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
@@ -146,9 +155,9 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
if s >= sLimit {
// Index first pair after match end.
- if int(s+l+4) < len(src) {
- cv := load3232(src, s)
- e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur}
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
}
@@ -156,15 +165,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
// Store every second hash in-between, but offset by 1.
for i := s - l + 2; i < s-5; i += 7 {
x := load6432(src, i)
- nextHash := hash4u(uint32(x), bTableBits)
+ nextHash := hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i}
// Skip one
x >>= 16
- nextHash = hash4u(uint32(x), bTableBits)
+ nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
// Skip one
x >>= 16
- nextHash = hash4u(uint32(x), bTableBits)
+ nextHash = hashLen(x, bTableBits, hashBytes)
e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
}
@@ -176,17 +185,17 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
// three load32 calls.
x := load6432(src, s-2)
o := e.cur + s - 2
- prevHash := hash4u(uint32(x), bTableBits)
- prevHash2 := hash4u(uint32(x>>8), bTableBits)
+ prevHash := hashLen(x, bTableBits, hashBytes)
+ prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
e.table[prevHash] = tableEntry{offset: o}
e.table[prevHash2] = tableEntry{offset: o + 1}
- currHash := hash4u(uint32(x>>16), bTableBits)
+ currHash := hashLen(x>>16, bTableBits, hashBytes)
candidate = e.table[currHash]
e.table[currHash] = tableEntry{offset: o + 2}
offset := s - (candidate.offset - e.cur)
if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
- cv = uint32(x >> 24)
+ cv = x >> 24
s++
break
}
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
index c22b4244a5c04..7aa2b72a129f4 100644
--- a/vendor/github.com/klauspost/compress/flate/level3.go
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -5,14 +5,17 @@ import "fmt"
// fastEncL3
type fastEncL3 struct {
fastGen
- table [tableSize]tableEntryPrev
+ table [1 << 16]tableEntryPrev
}
// Encode uses a similar algorithm to level 2, will check up to two candidates.
func (e *fastEncL3) Encode(dst *tokens, src []byte) {
const (
- inputMargin = 8 - 1
+ inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ tableBits = 16
+ tableSize = 1 << tableBits
+ hashBytes = 5
)
if debugDeflate && e.cur < 0 {
@@ -67,20 +70,20 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
sLimit := int32(len(src) - inputMargin)
// nextEmit is where in src the next emitLiteral should start from.
- cv := load3232(src, s)
+ cv := load6432(src, s)
for {
- const skipLog = 6
+ const skipLog = 7
nextS := s
var candidate tableEntry
for {
- nextHash := hash(cv)
+ nextHash := hashLen(cv, tableBits, hashBytes)
s = nextS
nextS = s + 1 + (s-nextEmit)>>skipLog
if nextS > sLimit {
goto emitRemainder
}
candidates := e.table[nextHash]
- now := load3232(src, nextS)
+ now := load6432(src, nextS)
// Safe offset distance until s + 4...
minOffset := e.cur + s - (maxMatchOffset - 4)
@@ -94,8 +97,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
continue
}
- if cv == load3232(src, candidate.offset-e.cur) {
- if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) {
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
break
}
// Both match and are valid, pick longest.
@@ -110,7 +113,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
// We only check if value mismatches.
// Offset will always be invalid in other cases.
candidate = candidates.Prev
- if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
break
}
}
@@ -141,7 +144,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
@@ -154,9 +165,9 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
if s >= sLimit {
t += l
// Index first pair after match end.
- if int(t+4) < len(src) && t > 0 {
- cv := load3232(src, t)
- nextHash := hash(cv)
+ if int(t+8) < len(src) && t > 0 {
+ cv = load6432(src, t)
+ nextHash := hashLen(cv, tableBits, hashBytes)
e.table[nextHash] = tableEntryPrev{
Prev: e.table[nextHash].Cur,
Cur: tableEntry{offset: e.cur + t},
@@ -165,32 +176,33 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
goto emitRemainder
}
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-3 to s.
- x := load6432(src, s-3)
- prevHash := hash(uint32(x))
- e.table[prevHash] = tableEntryPrev{
- Prev: e.table[prevHash].Cur,
- Cur: tableEntry{offset: e.cur + s - 3},
+ // Store every 5th hash in-between.
+ for i := s - l + 2; i < s-5; i += 6 {
+ nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + i}}
}
- x >>= 8
- prevHash = hash(uint32(x))
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s.
+ x := load6432(src, s-2)
+ prevHash := hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 2},
}
x >>= 8
- prevHash = hash(uint32(x))
+ prevHash = hashLen(x, tableBits, hashBytes)
e.table[prevHash] = tableEntryPrev{
Prev: e.table[prevHash].Cur,
Cur: tableEntry{offset: e.cur + s - 1},
}
x >>= 8
- currHash := hash(uint32(x))
+ currHash := hashLen(x, tableBits, hashBytes)
candidates := e.table[currHash]
- cv = uint32(x)
+ cv = x
e.table[currHash] = tableEntryPrev{
Prev: candidates.Cur,
Cur: tableEntry{offset: s + e.cur},
@@ -200,18 +212,18 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
candidate = candidates.Cur
minOffset := e.cur + s - (maxMatchOffset - 4)
- if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) {
- // We only check if value mismatches.
- // Offset will always be invalid in other cases.
+ if candidate.offset > minOffset {
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Found a match...
+ continue
+ }
candidate = candidates.Prev
- if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
- offset := s - (candidate.offset - e.cur)
- if offset <= maxMatchOffset {
- continue
- }
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Match at prev...
+ continue
}
}
- cv = uint32(x >> 8)
+ cv = x >> 8
s++
break
}
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
index e62f0c02b1e76..23c08b325cf7b 100644
--- a/vendor/github.com/klauspost/compress/flate/level4.go
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -12,6 +12,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
@@ -80,7 +81,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
nextS := s
var t int32
for {
- nextHashS := hash4x64(cv, tableBits)
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
@@ -135,7 +136,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
if debugDeflate {
if t >= s {
@@ -160,7 +169,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
// Index first pair after match end.
if int(s+8) < len(src) {
cv := load6432(src, s)
- e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
}
goto emitRemainder
@@ -175,7 +184,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
i += 3
for ; i < s-1; i += 3 {
@@ -184,7 +193,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1}
e.bTable[hash7(cv, tableBits)] = t
e.bTable[hash7(cv>>8, tableBits)] = t2
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
@@ -193,7 +202,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
- prevHashS := hash4x64(x, tableBits)
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
e.bTable[prevHashL] = tableEntry{offset: o}
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
index 293a3a320b7cc..83ef50ba45f09 100644
--- a/vendor/github.com/klauspost/compress/flate/level5.go
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -12,6 +12,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
@@ -88,7 +89,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
var l int32
var t int32
for {
- nextHashS := hash4x64(cv, tableBits)
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
@@ -105,7 +106,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
eLong := &e.bTable[nextHashL]
eLong.Cur, eLong.Prev = entry, eLong.Cur
- nextHashS = hash4x64(next, tableBits)
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
@@ -191,14 +192,21 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
- // Test current
- t2 := eLong - e.cur - l
- off := s - t2
+ t2 := eLong - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
}
@@ -210,7 +218,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
if debugDeflate {
if t >= s {
@@ -242,7 +258,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
if i < s-1 {
cv := load6432(src, i)
t := tableEntry{offset: i + e.cur}
- e.table[hash4x64(cv, tableBits)] = t
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
@@ -255,7 +271,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// We only have enough bits for a short entry at i+2
cv >>= 8
t = tableEntry{offset: t.offset + 1}
- e.table[hash4x64(cv, tableBits)] = t
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
// Skip one - otherwise we risk hitting 's'
i += 4
@@ -265,7 +281,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = t, eLong.Cur
- e.table[hash4u(uint32(cv>>8), tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
}
}
}
@@ -274,7 +290,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// compression we first update the hash table at s-1 and at s.
x := load6432(src, s-1)
o := e.cur + s - 1
- prevHashS := hash4x64(x, tableBits)
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
prevHashL := hash7(x, tableBits)
e.table[prevHashS] = tableEntry{offset: o}
eLong := &e.bTable[prevHashL]
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
index a709977ec49ae..f1e9d98fa5059 100644
--- a/vendor/github.com/klauspost/compress/flate/level6.go
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -12,6 +12,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
const (
inputMargin = 12 - 1
minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
)
if debugDeflate && e.cur < 0 {
panic(fmt.Sprint("e.cur < 0: ", e.cur))
@@ -90,7 +91,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
var l int32
var t int32
for {
- nextHashS := hash4x64(cv, tableBits)
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
nextHashL := hash7(cv, tableBits)
s = nextS
nextS = s + doEvery + (s-nextEmit)>>skipLog
@@ -107,7 +108,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
eLong.Cur, eLong.Prev = entry, eLong.Cur
// Calculate hashes of 'next'
- nextHashS = hash4x64(next, tableBits)
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
nextHashL = hash7(next, tableBits)
t = lCandidate.Cur.offset - e.cur
@@ -213,24 +214,33 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// Try to locate a better match by checking the end-of-match...
if sAt := s + l; sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
// Test current
- t2 := eLong.Cur.offset - e.cur - l
- off := s - t2
+ t2 := eLong.Cur.offset - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
if off < maxMatchOffset {
if off > 0 && t2 >= 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
// Test next:
- t2 = eLong.Prev.offset - e.cur - l
- off := s - t2
+ t2 = eLong.Prev.offset - e.cur - l + skipBeginning
+ off := s2 - t2
if off > 0 && off < maxMatchOffset && t2 >= 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
}
@@ -243,7 +253,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
if false {
if t >= s {
@@ -269,7 +287,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// Index after match end.
for i := nextS + 1; i < int32(len(src))-8; i += 2 {
cv := load6432(src, i)
- e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
}
@@ -284,7 +302,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
t2 := tableEntry{offset: t.offset + 1}
eLong := &e.bTable[hash7(cv, tableBits)]
eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
- e.table[hash4x64(cv, tableBits)] = t
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
eLong.Cur, eLong.Prev = t, eLong.Cur
eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
}
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
index 53e899124639e..93a1d150312e0 100644
--- a/vendor/github.com/klauspost/compress/flate/stateless.go
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -59,9 +59,9 @@ var bitWriterPool = sync.Pool{
},
}
-// StatelessDeflate allows to compress directly to a Writer without retaining state.
+// StatelessDeflate allows compressing directly to a Writer without retaining state.
// When returning everything will be flushed.
-// Up to 8KB of an optional dictionary can be given which is presumed to presumed to precede the block.
+// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
// Longer dictionaries will be truncated and will still produce valid output.
// Sending nil dictionary is perfectly fine.
func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
@@ -249,7 +249,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) {
l++
}
if nextEmit < s {
- emitLiteral(dst, src[nextEmit:s])
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
}
// Save the match found
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
index eb862d7a920d2..d818790c13236 100644
--- a/vendor/github.com/klauspost/compress/flate/token.go
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -13,11 +13,10 @@ import (
)
const (
- // From top
- // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
- // 8 bits: xlength = length - MIN_MATCH_LENGTH
- // 5 bits offsetcode
- // 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
+ // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
+ // bits 16-22 offsetcode - 5 bits
+ // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
+ // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
lengthShift = 22
offsetMask = 1<<lengthShift - 1
typeMask = 3 << 30
@@ -129,11 +128,11 @@ var offsetCodes14 = [256]uint32{
type token uint32
type tokens struct {
- nLits int
extraHist [32]uint16 // codes 256->maxnumlit
offHist [32]uint16 // offset codes
litHist [256]uint16 // codes 0->255
- n uint16 // Must be able to contain maxStoreBlockSize
+ nFilled int
+ n uint16 // Must be able to contain maxStoreBlockSize
tokens [maxStoreBlockSize + 1]token
}
@@ -142,7 +141,7 @@ func (t *tokens) Reset() {
return
}
t.n = 0
- t.nLits = 0
+ t.nFilled = 0
for i := range t.litHist[:] {
t.litHist[i] = 0
}
@@ -161,12 +160,12 @@ func (t *tokens) Fill() {
for i, v := range t.litHist[:] {
if v == 0 {
t.litHist[i] = 1
- t.nLits++
+ t.nFilled++
}
}
for i, v := range t.extraHist[:literalCount-256] {
if v == 0 {
- t.nLits++
+ t.nFilled++
t.extraHist[i] = 1
}
}
@@ -196,20 +195,17 @@ func (t *tokens) indexTokens(in []token) {
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst *tokens, lit []byte) {
- ol := int(dst.n)
- for i, v := range lit {
- dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
+ for _, v := range lit {
+ dst.tokens[dst.n] = token(v)
dst.litHist[v]++
+ dst.n++
}
- dst.n += uint16(len(lit))
- dst.nLits += len(lit)
}
func (t *tokens) AddLiteral(lit byte) {
t.tokens[t.n] = token(lit)
t.litHist[lit]++
t.n++
- t.nLits++
}
// from https://stackoverflow.com/a/28730362
@@ -230,8 +226,9 @@ func (t *tokens) EstimatedBits() int {
shannon := float32(0)
bits := int(0)
nMatches := 0
- if t.nLits > 0 {
- invTotal := 1.0 / float32(t.nLits)
+ total := int(t.n) + t.nFilled
+ if total > 0 {
+ invTotal := 1.0 / float32(total)
for _, v := range t.litHist[:] {
if v > 0 {
n := float32(v)
@@ -275,10 +272,9 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
}
oCode := offsetCode(xoffset)
xoffset |= oCode << 16
- t.nLits++
t.extraHist[lengthCodes1[uint8(xlength)]]++
- t.offHist[oCode]++
+ t.offHist[oCode&31]++
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
t.n++
}
@@ -297,13 +293,16 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
xl := xlength
if xl > 258 {
// We need to have at least baseMatchLength left over for next loop.
- xl = 258 - baseMatchLength
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
}
xlength -= xl
xl -= baseMatchLength
- t.nLits++
t.extraHist[lengthCodes1[uint8(xl)]]++
- t.offHist[oc]++
+ t.offHist[oc&31]++
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
t.n++
}
@@ -359,8 +358,8 @@ func (t token) offset() uint32 { return uint32(t) & offsetMask }
func (t token) length() uint8 { return uint8(t >> lengthShift) }
-// The code is never more than 8 bits, but is returned as uint32 for convenience.
-func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
+// Convert length to code.
+func lengthCode(len uint8) uint8 { return lengthCodes[len] }
// Returns the offset code corresponding to a specific offset
func offsetCode(off uint32) uint32 {
diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go
index 4d7018913e297..66fe5ddf72ce7 100644
--- a/vendor/github.com/klauspost/compress/gzip/gunzip.go
+++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go
@@ -252,42 +252,40 @@ func (z *Reader) Read(p []byte) (n int, err error) {
return 0, z.err
}
- n, z.err = z.decompressor.Read(p)
- z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
- z.size += uint32(n)
- if z.err != io.EOF {
- // In the normal case we return here.
- return n, z.err
- }
+ for n == 0 {
+ n, z.err = z.decompressor.Read(p)
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+ z.size += uint32(n)
+ if z.err != io.EOF {
+ // In the normal case we return here.
+ return n, z.err
+ }
- // Finished file; check checksum and size.
- if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
- z.err = noEOF(err)
- return n, z.err
- }
- digest := le.Uint32(z.buf[:4])
- size := le.Uint32(z.buf[4:8])
- if digest != z.digest || size != z.size {
- z.err = ErrChecksum
- return n, z.err
- }
- z.digest, z.size = 0, 0
+ // Finished file; check checksum and size.
+ if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+ z.err = noEOF(err)
+ return n, z.err
+ }
+ digest := le.Uint32(z.buf[:4])
+ size := le.Uint32(z.buf[4:8])
+ if digest != z.digest || size != z.size {
+ z.err = ErrChecksum
+ return n, z.err
+ }
+ z.digest, z.size = 0, 0
- // File is ok; check if there is another.
- if !z.multistream {
- return n, io.EOF
- }
- z.err = nil // Remove io.EOF
+ // File is ok; check if there is another.
+ if !z.multistream {
+ return n, io.EOF
+ }
+ z.err = nil // Remove io.EOF
- if _, z.err = z.readHeader(); z.err != nil {
- return n, z.err
+ if _, z.err = z.readHeader(); z.err != nil {
+ return n, z.err
+ }
}
- // Read from next file, if necessary.
- if n > 0 {
- return n, nil
- }
- return z.Read(p)
+ return n, nil
}
// Support the io.WriteTo interface for io.Copy and friends.
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index a4979e8868a52..504a7be9dae3d 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -8,115 +8,10 @@ package huff0
import (
"encoding/binary"
"errors"
+ "fmt"
"io"
)
-// bitReader reads a bitstream in reverse.
-// The last set bit indicates the start of the stream and is used
-// for aligning the input.
-type bitReader struct {
- in []byte
- off uint // next byte to read is at in[off - 1]
- value uint64
- bitsRead uint8
-}
-
-// init initializes and resets the bit reader.
-func (b *bitReader) init(in []byte) error {
- if len(in) < 1 {
- return errors.New("corrupt stream: too short")
- }
- b.in = in
- b.off = uint(len(in))
- // The highest bit of the last byte indicates where to start
- v := in[len(in)-1]
- if v == 0 {
- return errors.New("corrupt stream, did not find end of stream")
- }
- b.bitsRead = 64
- b.value = 0
- if len(in) >= 8 {
- b.fillFastStart()
- } else {
- b.fill()
- b.fill()
- }
- b.bitsRead += 8 - uint8(highBit32(uint32(v)))
- return nil
-}
-
-// peekBitsFast requires that at least one bit is requested every time.
-// There are no checks if the buffer is filled.
-func (b *bitReader) peekBitsFast(n uint8) uint16 {
- const regMask = 64 - 1
- v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
- return v
-}
-
-// fillFast() will make sure at least 32 bits are available.
-// There must be at least 4 bytes available.
-func (b *bitReader) fillFast() {
- if b.bitsRead < 32 {
- return
- }
-
- // 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
- b.bitsRead -= 32
- b.off -= 4
-}
-
-func (b *bitReader) advance(n uint8) {
- b.bitsRead += n
-}
-
-// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
-func (b *bitReader) fillFastStart() {
- // Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
- b.bitsRead = 0
- b.off -= 8
-}
-
-// fill() will make sure at least 32 bits are available.
-func (b *bitReader) fill() {
- if b.bitsRead < 32 {
- return
- }
- if b.off > 4 {
- v := b.in[b.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
- b.bitsRead -= 32
- b.off -= 4
- return
- }
- for b.off > 0 {
- b.value = (b.value << 8) | uint64(b.in[b.off-1])
- b.bitsRead -= 8
- b.off--
- }
-}
-
-// finished returns true if all bits have been read from the bit stream.
-func (b *bitReader) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
-}
-
-// close the bitstream and returns an error if out-of-buffer reads occurred.
-func (b *bitReader) close() error {
- // Release reference.
- b.in = nil
- if b.bitsRead > 64 {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-
// bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
@@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
+func (b *bitReaderBytes) remaining() uint {
+ return b.off*8 + uint(64-b.bitsRead)
+}
+
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderBytes) close() error {
// Release reference.
b.in = nil
+ if b.remaining() > 0 {
+ return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
@@ -313,15 +215,17 @@ func (b *bitReaderShifted) fill() {
}
}
-// finished returns true if all bits have been read from the bit stream.
-func (b *bitReaderShifted) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
+func (b *bitReaderShifted) remaining() uint {
+ return b.off*8 + uint(64-b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderShifted) close() error {
// Release reference.
b.in = nil
+ if b.remaining() > 0 {
+ return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index 6bce4e87d4ff6..ec71f7a349a14 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -5,8 +5,6 @@
package huff0
-import "fmt"
-
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
-// addBits16NC will add up to 16 bits.
-// It will not check if there is space for them,
-// so the caller must ensure that it has flushed recently.
-func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
- b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
- b.nBits += bits
-}
-
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
-// addBits16ZeroNC will add up to 16 bits.
-// It will not check if there is space for them,
-// so the caller must ensure that it has flushed recently.
-// This is fastest if bits can be zero.
-func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
- if bits == 0 {
- return
- }
- value <<= (16 - bits) & 15
- value >>= (16 - bits) & 15
- b.bitContainer |= uint64(value) << (b.nBits & 63)
- b.nBits += bits
-}
-
-// flush will flush all pending full bytes.
-// There will be at least 56 bits available for writing when this has been called.
-// Using flush32 is faster, but leaves less space for writing.
-func (b *bitWriter) flush() {
- v := b.nBits >> 3
- switch v {
- case 0:
- return
- case 1:
- b.out = append(b.out,
- byte(b.bitContainer),
- )
- b.bitContainer >>= 1 << 3
- case 2:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- )
- b.bitContainer >>= 2 << 3
- case 3:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- )
- b.bitContainer >>= 3 << 3
- case 4:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- )
- b.bitContainer >>= 4 << 3
- case 5:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- )
- b.bitContainer >>= 5 << 3
- case 6:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- )
- b.bitContainer >>= 6 << 3
- case 7:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- )
- b.bitContainer >>= 7 << 3
- case 8:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- byte(b.bitContainer>>56),
- )
- b.bitContainer = 0
- b.nBits = 0
- return
- default:
- panic(fmt.Errorf("bits (%d) > 64", b.nBits))
- }
- b.nBits &= 7
-}
-
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
@@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
b.flushAlign()
return nil
}
-
-// reset and continue writing by appending to out.
-func (b *bitWriter) reset(out []byte) {
- b.bitContainer = 0
- b.nBits = 0
- b.out = out
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
index 50bcdf6ea99ce..4dcab8d232775 100644
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go
@@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
b.off = 0
}
-// advance the stream b n bytes.
-func (b *byteReader) advance(n uint) {
- b.off += int(n)
-}
-
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3])
@@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
}
-// unread returns the unread portion of the input.
-func (b byteReader) unread() []byte {
- return b.b[b.off:]
-}
-
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 8323dc053890b..4d14542facf38 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -2,6 +2,7 @@ package huff0
import (
"fmt"
+ "math"
"runtime"
"sync"
)
@@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
if err != nil {
return nil, err
}
+ if len(s.Out)-idx > math.MaxUint16 {
+ // We cannot store the size in the jump table
+ return nil, ErrIncompressible
+ }
// Write compressed length as little endian before block.
if i < 3 {
// Last length is not written.
@@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
return nil, errs[i]
}
o := s.tmpOut[i]
+ if len(o) > math.MaxUint16 {
+ // We cannot store the size in the jump table
+ return nil, ErrIncompressible
+ }
// Write compressed length as little endian before block.
if i < 3 {
// Last length is not written.
@@ -395,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true
}
+//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) {
return false
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 2a06bd1a7e524..42a237eac4abd 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -4,13 +4,13 @@ import (
"errors"
"fmt"
"io"
+ "sync"
"github.com/klauspost/compress/fse"
)
type dTable struct {
single []dEntrySingle
- double []dEntryDouble
}
// single-symbols decoding
@@ -18,13 +18,6 @@ type dEntrySingle struct {
entry uint16
}
-// double-symbols decoding
-type dEntryDouble struct {
- seq [4]byte
- nBits uint8
- len uint8
-}
-
// Uses special code for all tables that are < 8 bits.
const use8BitTables = true
@@ -34,7 +27,7 @@ const use8BitTables = true
// If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
- s, err = s.prepare(in)
+ s, err = s.prepare(nil)
if err != nil {
return s, nil, err
}
@@ -216,6 +209,7 @@ func (s *Scratch) Decoder() *Decoder {
return &Decoder{
dt: s.dt,
actualTableLog: s.actualTableLog,
+ bufs: &s.decPool,
}
}
@@ -223,103 +217,15 @@ func (s *Scratch) Decoder() *Decoder {
type Decoder struct {
dt dTable
actualTableLog uint8
+ bufs *sync.Pool
}
-// Decompress1X will decompress a 1X encoded stream.
-// The cap of the output buffer will be the maximum decompressed size.
-// The length of the supplied input must match the end of a block exactly.
-func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress1X8Bit(dst, src)
- }
- var br bitReaderShifted
- err := br.init(src)
- if err != nil {
- return dst, err
- }
- maxDecodedSize := cap(dst)
- dst = dst[:0]
-
- // Avoid bounds check by always having full sized table.
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- dt := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
- var off uint8
-
- for br.off >= 8 {
- br.fillFast()
- v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+0] = uint8(v.entry >> 8)
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+1] = uint8(v.entry >> 8)
-
- // Refill
- br.fillFast()
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+2] = uint8(v.entry >> 8)
-
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
- br.advance(uint8(v.entry))
- buf[off+3] = uint8(v.entry >> 8)
-
- off += 4
- if off == 0 {
- if len(dst)+256 > maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- dst = append(dst, buf[:]...)
- }
- }
-
- if len(dst)+int(off) > maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- dst = append(dst, buf[:off]...)
-
- // br < 8, so uint8 is fine
- bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
- for bitsLeft > 0 {
- br.fill()
- if false && br.bitsRead >= 32 {
- if br.off >= 4 {
- v := br.in[br.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- br.value = (br.value << 32) | uint64(low)
- br.bitsRead -= 32
- br.off -= 4
- } else {
- for br.off > 0 {
- br.value = (br.value << 8) | uint64(br.in[br.off-1])
- br.bitsRead -= 8
- br.off--
- }
- }
- }
- if len(dst) >= maxDecodedSize {
- br.close()
- return nil, ErrMaxDecodedSizeExceeded
- }
- v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
- nBits := uint8(v.entry)
- br.advance(nBits)
- bitsLeft -= nBits
- dst = append(dst, uint8(v.entry>>8))
+func (d *Decoder) buffer() *[4][256]byte {
+ buf, ok := d.bufs.Get().(*[4][256]byte)
+ if ok {
+ return buf
}
- return dst, br.close()
+ return &[4][256]byte{}
}
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
@@ -341,7 +247,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
switch d.actualTableLog {
@@ -369,6 +276,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
@@ -398,6 +306,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
@@ -426,6 +335,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -455,6 +365,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -484,6 +395,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -513,6 +425,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -542,6 +455,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -571,6 +485,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -578,10 +493,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
}
}
default:
+ d.bufs.Put(bufs)
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -601,6 +518,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
}
if len(dst) >= maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
v := dt[br.peekByteFast()>>shift]
@@ -609,6 +527,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
@@ -628,7 +547,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
const shift = 56
@@ -655,6 +575,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -663,6 +584,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -679,6 +601,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
}
}
if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -688,195 +611,10 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
-// Decompress4X will decompress a 4X encoded stream.
-// The length of the supplied input must match the end of a block exactly.
-// The *capacity* of the dst slice must match the destination size of
-// the uncompressed data exactly.
-func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if len(src) < 6+(4*1) {
- return nil, errors.New("input too small")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress4X8bit(dst, src)
- }
-
- var br [4]bitReaderShifted
- start := 6
- for i := 0; i < 3; i++ {
- length := int(src[i*2]) | (int(src[i*2+1]) << 8)
- if start+length >= len(src) {
- return nil, errors.New("truncated input (or invalid offset)")
- }
- err := br[i].init(src[start : start+length])
- if err != nil {
- return nil, err
- }
- start += length
- }
- err := br[3].init(src[start:])
- if err != nil {
- return nil, err
- }
-
- // destination, offset to match first output
- dstSize := cap(dst)
- dst = dst[:dstSize]
- out := dst
- dstEvery := (dstSize + 3) / 4
-
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- single := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
- var off uint8
- var decoded int
-
- // Decode 2 values from each decoder/loop.
- const bufoff = 256 / 4
- for {
- if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
- break
- }
-
- {
- const stream = 0
- const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- v2 := single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- v2 = single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
- }
-
- {
- const stream = 2
- const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- v2 := single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- v2 = single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
- }
-
- off += 2
-
- if off == bufoff {
- if bufoff > dstEvery {
- return nil, errors.New("corruption detected: stream overrun 1")
- }
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
- out = out[bufoff:]
- decoded += 256
- // There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
- return nil, errors.New("corruption detected: stream overrun 2")
- }
- }
- }
- if off > 0 {
- ioff := int(off)
- if len(out) < dstEvery*3+ioff {
- return nil, errors.New("corruption detected: stream overrun 3")
- }
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
- decoded += int(off) * 4
- out = out[off:]
- }
-
- // Decode remaining.
- for i := range br {
- offset := dstEvery * i
- br := &br[i]
- bitsLeft := br.off*8 + uint(64-br.bitsRead)
- for bitsLeft > 0 {
- br.fill()
- if false && br.bitsRead >= 32 {
- if br.off >= 4 {
- v := br.in[br.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- br.value = (br.value << 32) | uint64(low)
- br.bitsRead -= 32
- br.off -= 4
- } else {
- for br.off > 0 {
- br.value = (br.value << 8) | uint64(br.in[br.off-1])
- br.bitsRead -= 8
- br.off--
- }
- }
- }
- // end inline...
- if offset >= len(out) {
- return nil, errors.New("corruption detected: stream overrun 4")
- }
-
- // Read value and increment offset.
- val := br.peekBitsFast(d.actualTableLog)
- v := single[val&tlMask].entry
- nBits := uint8(v)
- br.advance(nBits)
- bitsLeft -= uint(nBits)
- out[offset] = uint8(v >> 8)
- offset++
- }
- decoded += offset - dstEvery*i
- err = br.close()
- if err != nil {
- return nil, err
- }
- }
- if dstSize != decoded {
- return nil, errors.New("corruption detected: short output block")
- }
- return dst, nil
-}
-
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
@@ -916,12 +654,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -942,8 +680,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -951,8 +689,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -960,8 +698,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -969,8 +707,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- buf[off+bufoff*stream+3] = uint8(v >> 8)
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
{
@@ -987,8 +725,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -996,8 +734,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -1005,8 +743,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -1014,49 +752,61 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- buf[off+bufoff*stream+3] = uint8(v >> 8)
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
- out = out[bufoff:]
- decoded += 256
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
br := &br[i]
- bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+ bitsLeft := br.remaining()
for bitsLeft > 0 {
if br.finished() {
+ d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
@@ -1076,7 +826,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
}
}
// end inline...
- if offset >= len(out) {
+ if offset >= endsAt {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@@ -1084,16 +835,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
v := single[uint8(br.value>>shift)].entry
nBits := uint8(v)
br.advance(nBits)
- bitsLeft -= int(nBits)
+ bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
+ d.bufs.Put(buf)
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
@@ -1131,16 +888,15 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
const shift = 56
const tlSize = 1 << 8
- const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -1150,106 +906,116 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[uint8(br[stream].value>>shift)].entry
- v2 := single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
{
const stream = 2
const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[uint8(br[stream].value>>shift)].entry
- v2 := single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
- out = out[bufoff:]
- decoded += 256
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ // copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
@@ -1257,21 +1023,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
if len(out) < dstEvery*3+ioff {
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
br := &br[i]
- bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+ bitsLeft := br.remaining()
for bitsLeft > 0 {
if br.finished() {
+ d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
@@ -1291,7 +1063,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
}
}
// end inline...
- if offset >= len(out) {
+ if offset >= endsAt {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@@ -1299,16 +1072,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
v := single[br.peekByteFast()].entry
nBits := uint8(v)
br.advance(nBits)
- bitsLeft -= int(nBits)
+ bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
+ d.bufs.Put(buf)
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
new file mode 100644
index 0000000000000..ba7e8e6b02768
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -0,0 +1,226 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// This file contains the specialisation of Decoder.Decompress4X
+// and Decoder.Decompress1X that use an asm implementation of thir main loops.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
+)
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog > 8.
+//
+//go:noescape
+func decompress4x_main_loop_amd64(ctx *decompress4xContext)
+
+// decompress4x_8b_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog <= 8 which decodes 4 entries
+// per loop.
+//
+//go:noescape
+func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
+
+// fallback8BitSize is the size where using Go version is faster.
+const fallback8BitSize = 800
+
+type decompress4xContext struct {
+ pbr *[4]bitReaderShifted
+ peekBits uint8
+ out *byte
+ dstEvery int
+ tbl *dEntrySingle
+ decoded int
+ limit *byte
+}
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+
+ use8BitTables := d.actualTableLog <= 8
+ if cap(dst) < fallback8BitSize && use8BitTables {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ var decoded int
+
+ if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
+ ctx := decompress4xContext{
+ pbr: &br,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ out: &out[0],
+ dstEvery: dstEvery,
+ tbl: &single[0],
+ limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
+ }
+ if use8BitTables {
+ decompress4x_8b_main_loop_amd64(&ctx)
+ } else {
+ decompress4x_main_loop_amd64(&ctx)
+ }
+
+ decoded = ctx.decoded
+ out = out[decoded/4:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress1X when tablelog > 8.
+//
+//go:noescape
+func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+
+// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
+// of Decompress1X when tablelog > 8.
+//
+//go:noescape
+func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+
+type decompress1xContext struct {
+ pbr *bitReaderShifted
+ peekBits uint8
+ out *byte
+ outCap int
+ tbl *dEntrySingle
+ decoded int
+}
+
+// Error reported by asm implementations
+const error_max_decoded_size_exeeded = -1
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:maxDecodedSize]
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+
+ if maxDecodedSize >= 4 {
+ ctx := decompress1xContext{
+ pbr: &br,
+ out: &dst[0],
+ outCap: maxDecodedSize,
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
+ tbl: &d.dt.single[0],
+ }
+
+ if cpuinfo.HasBMI2() {
+ decompress1x_main_loop_bmi2(&ctx)
+ } else {
+ decompress1x_main_loop_amd64(&ctx)
+ }
+ if ctx.decoded == error_max_decoded_size_exeeded {
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+
+ dst = dst[:ctx.decoded]
+ }
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if len(dst) >= maxDecodedSize {
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
new file mode 100644
index 0000000000000..8d2187a2ce6a3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -0,0 +1,846 @@
+// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
+
+//go:build amd64 && !appengine && !noasm && gc
+
+// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
+TEXT ·decompress4x_main_loop_amd64(SB), $0-8
+ XORQ DX, DX
+
+ // Preload values
+ MOVQ ctx+0(FP), AX
+ MOVBQZX 8(AX), DI
+ MOVQ 16(AX), SI
+ MOVQ 48(AX), BX
+ MOVQ 24(AX), R9
+ MOVQ 32(AX), R10
+ MOVQ (AX), R11
+
+ // Main loop
+main_loop:
+ MOVQ SI, R8
+ CMPQ R8, BX
+ SETGE DL
+
+ // br0.fillFast32()
+ MOVQ 32(R11), R12
+ MOVBQZX 40(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill0
+ MOVQ 24(R11), AX
+ SUBQ $0x20, R13
+ SUBQ $0x04, AX
+ MOVQ (R11), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 24(R11)
+ ORQ R14, R12
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ AX, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill0:
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
+
+ // v1 := table[val1&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 32(R11)
+ MOVB R13, 40(R11)
+ ADDQ R9, R8
+
+ // br1.fillFast32()
+ MOVQ 80(R11), R12
+ MOVBQZX 88(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill1
+ MOVQ 72(R11), AX
+ SUBQ $0x20, R13
+ SUBQ $0x04, AX
+ MOVQ 48(R11), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 72(R11)
+ ORQ R14, R12
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ AX, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill1:
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
+
+ // v1 := table[val1&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 80(R11)
+ MOVB R13, 88(R11)
+ ADDQ R9, R8
+
+ // br2.fillFast32()
+ MOVQ 128(R11), R12
+ MOVBQZX 136(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill2
+ MOVQ 120(R11), AX
+ SUBQ $0x20, R13
+ SUBQ $0x04, AX
+ MOVQ 96(R11), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 120(R11)
+ ORQ R14, R12
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ AX, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill2:
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
+
+ // v1 := table[val1&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 128(R11)
+ MOVB R13, 136(R11)
+ ADDQ R9, R8
+
+ // br3.fillFast32()
+ MOVQ 176(R11), R12
+ MOVBQZX 184(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill3
+ MOVQ 168(R11), AX
+ SUBQ $0x20, R13
+ SUBQ $0x04, AX
+ MOVQ 144(R11), R14
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (AX)(R14*1), R14
+ MOVQ R13, CX
+ SHLQ CL, R14
+ MOVQ AX, 168(R11)
+ ORQ R14, R12
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ AX, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill3:
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ DI, CX
+ MOVQ R12, R14
+ SHRQ CL, R14
+
+ // v1 := table[val1&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v1.entry))
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // these two writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ MOVW AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 176(R11)
+ MOVB R13, 184(R11)
+ ADDQ $0x02, SI
+ TESTB DL, DL
+ JZ main_loop
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), SI
+ SHLQ $0x02, SI
+ MOVQ SI, 40(AX)
+ RET
+
+// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
+TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
+ XORQ DX, DX
+
+ // Preload values
+ MOVQ ctx+0(FP), CX
+ MOVBQZX 8(CX), DI
+ MOVQ 16(CX), BX
+ MOVQ 48(CX), SI
+ MOVQ 24(CX), R9
+ MOVQ 32(CX), R10
+ MOVQ (CX), R11
+
+ // Main loop
+main_loop:
+ MOVQ BX, R8
+ CMPQ R8, SI
+ SETGE DL
+
+ // br0.fillFast32()
+ MOVQ 32(R11), R12
+ MOVBQZX 40(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill0
+ MOVQ 24(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ (R11), R15
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 24(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ R14, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill0:
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v1 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // val2 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v2 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val3 := br0.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v3 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br0.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 32(R11)
+ MOVB R13, 40(R11)
+ ADDQ R9, R8
+
+ // br1.fillFast32()
+ MOVQ 80(R11), R12
+ MOVBQZX 88(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill1
+ MOVQ 72(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 48(R11), R15
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 72(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ R14, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill1:
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v1 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // val2 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v2 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val3 := br1.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v3 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br1.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 80(R11)
+ MOVB R13, 88(R11)
+ ADDQ R9, R8
+
+ // br2.fillFast32()
+ MOVQ 128(R11), R12
+ MOVBQZX 136(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill2
+ MOVQ 120(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 96(R11), R15
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 120(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ R14, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill2:
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v1 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // val2 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v2 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val3 := br2.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v3 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br2.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 128(R11)
+ MOVB R13, 136(R11)
+ ADDQ R9, R8
+
+ // br3.fillFast32()
+ MOVQ 176(R11), R12
+ MOVBQZX 184(R11), R13
+ CMPQ R13, $0x20
+ JBE skip_fill3
+ MOVQ 168(R11), R14
+ SUBQ $0x20, R13
+ SUBQ $0x04, R14
+ MOVQ 144(R11), R15
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVL (R14)(R15*1), R15
+ MOVQ R13, CX
+ SHLQ CL, R15
+ MOVQ R14, 168(R11)
+ ORQ R15, R12
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ R14, $0x04
+ SETLT AL
+ ORB AL, DL
+
+skip_fill3:
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v0 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v0.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v1 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v1.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // val2 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v2 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v2.entry)
+ MOVB CH, AH
+ SHLQ CL, R12
+ ADDB CL, R13
+
+ // val3 := br3.peekTopBits(peekBits)
+ MOVQ R12, R14
+ MOVQ DI, CX
+ SHRQ CL, R14
+
+ // v3 := table[val0&mask]
+ MOVW (R10)(R14*2), CX
+
+ // br3.advance(uint8(v3.entry)
+ MOVB CH, AL
+ SHLQ CL, R12
+ ADDB CL, R13
+ BSWAPL AX
+
+ // these four writes get coalesced
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8)
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8)
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8)
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8)
+ MOVL AX, (R8)
+
+ // update the bitreader structure
+ MOVQ R12, 176(R11)
+ MOVB R13, 184(R11)
+ ADDQ $0x04, BX
+ TESTB DL, DL
+ JZ main_loop
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), BX
+ SHLQ $0x02, BX
+ MOVQ BX, 40(AX)
+ RET
+
+// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
+TEXT ·decompress1x_main_loop_amd64(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exeeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
+
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exeeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_1_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), R12
+ MOVQ R11, CX
+ SHLQ CL, R12
+ ORQ R12, R10
+
+bitReader_fillFast_2_end:
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ MOVQ DI, CX
+ MOVQ R10, R12
+ SHRQ CL, R12
+ MOVW (SI)(R12*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLQ CL, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
+
+ // Report error
+error_max_decoded_size_exeeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
+
+// func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
+// Requires: BMI2
+TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
+ MOVQ ctx+0(FP), CX
+ MOVQ 16(CX), DX
+ MOVQ 24(CX), BX
+ CMPQ BX, $0x04
+ JB error_max_decoded_size_exeeded
+ LEAQ (DX)(BX*1), BX
+ MOVQ (CX), SI
+ MOVQ (SI), R8
+ MOVQ 24(SI), R9
+ MOVQ 32(SI), R10
+ MOVBQZX 40(SI), R11
+ MOVQ 32(CX), SI
+ MOVBQZX 8(CX), DI
+ JMP loop_condition
+
+main_loop:
+ // Check if we have room for 4 bytes in the output buffer
+ LEAQ 4(DX), CX
+ CMPQ CX, BX
+ JGE error_max_decoded_size_exeeded
+
+ // Decode 4 values
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_1_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_1_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+ CMPQ R11, $0x20
+ JL bitReader_fillFast_2_end
+ SUBQ $0x20, R11
+ SUBQ $0x04, R9
+ MOVL (R8)(R9*1), CX
+ SHLXQ R11, CX, CX
+ ORQ CX, R10
+
+bitReader_fillFast_2_end:
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AH
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ SHRXQ DI, R10, CX
+ MOVW (SI)(CX*2), CX
+ MOVB CH, AL
+ MOVBQZX CL, CX
+ ADDQ CX, R11
+ SHLXQ CX, R10, R10
+ BSWAPL AX
+
+ // Store the decoded values
+ MOVL AX, (DX)
+ ADDQ $0x04, DX
+
+loop_condition:
+ CMPQ R9, $0x08
+ JGE main_loop
+
+ // Update ctx structure
+ MOVQ ctx+0(FP), AX
+ SUBQ 16(AX), DX
+ MOVQ DX, 40(AX)
+ MOVQ (AX), AX
+ MOVQ R9, 24(AX)
+ MOVQ R10, 32(AX)
+ MOVB R11, 40(AX)
+ RET
+
+ // Report error
+error_max_decoded_size_exeeded:
+ MOVQ ctx+0(FP), AX
+ MOVQ $-1, CX
+ MOVQ CX, 40(AX)
+ RET
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
new file mode 100644
index 0000000000000..908c17de63fc8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -0,0 +1,299 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// This file contains a generic implementation of Decoder.Decompress4X.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ off += 2
+
+ if off == 0 {
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ // There must at least be 3 buffers left.
+ if len(out)-bufoff < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ //copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
+
+// Decompress1X will decompress a 1X encoded stream.
+// The cap of the output buffer will be the maximum decompressed size.
+// The length of the supplied input must match the end of a block exactly.
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress1X8Bit(dst, src)
+ }
+ var br bitReaderShifted
+ err := br.init(src)
+ if err != nil {
+ return dst, err
+ }
+ maxDecodedSize := cap(dst)
+ dst = dst[:0]
+
+ // Avoid bounds check by always having full sized table.
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ dt := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ bufs := d.buffer()
+ buf := &bufs[0]
+ var off uint8
+
+ for br.off >= 8 {
+ br.fillFast()
+ v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+0] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+1] = uint8(v.entry >> 8)
+
+ // Refill
+ br.fillFast()
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+2] = uint8(v.entry >> 8)
+
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
+ br.advance(uint8(v.entry))
+ buf[off+3] = uint8(v.entry >> 8)
+
+ off += 4
+ if off == 0 {
+ if len(dst)+256 > maxDecodedSize {
+ br.close()
+ d.bufs.Put(bufs)
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:]...)
+ }
+ }
+
+ if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ dst = append(dst, buf[:off]...)
+
+ // br < 8, so uint8 is fine
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
+ for bitsLeft > 0 {
+ br.fill()
+ if false && br.bitsRead >= 32 {
+ if br.off >= 4 {
+ v := br.in[br.off-4:]
+ v = v[:4]
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ br.value = (br.value << 32) | uint64(low)
+ br.bitsRead -= 32
+ br.off -= 4
+ } else {
+ for br.off > 0 {
+ br.value = (br.value << 8) | uint64(br.in[br.off-1])
+ br.bitsRead -= 8
+ br.off--
+ }
+ }
+ }
+ if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
+ br.close()
+ return nil, ErrMaxDecodedSizeExceeded
+ }
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
+ nBits := uint8(v.entry)
+ br.advance(nBits)
+ bitsLeft -= nBits
+ dst = append(dst, uint8(v.entry>>8))
+ }
+ d.bufs.Put(bufs)
+ return dst, br.close()
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 3ee00ecb470ab..e8ad17ad08ef1 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
"math/bits"
+ "sync"
"github.com/klauspost/compress/fse"
)
@@ -116,6 +117,7 @@ type Scratch struct {
nodes []nodeElt
tmpOut [4][]byte
fse *fse.Scratch
+ decPool sync.Pool // *[4][256]byte buffers.
huffWeight [maxSymbolValue + 1]byte
}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
new file mode 100644
index 0000000000000..3954c51219b2b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
@@ -0,0 +1,34 @@
+// Package cpuinfo gives runtime info about the current CPU.
+//
+// This is a very limited module meant for use internally
+// in this project. For more versatile solution check
+// https://github.com/klauspost/cpuid.
+package cpuinfo
+
+// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
+func HasBMI1() bool {
+ return hasBMI1
+}
+
+// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
+func HasBMI2() bool {
+ return hasBMI2
+}
+
+// DisableBMI2 will disable BMI2, for testing purposes.
+// Call returned function to restore previous state.
+func DisableBMI2() func() {
+ old := hasBMI2
+ hasBMI2 = false
+ return func() {
+ hasBMI2 = old
+ }
+}
+
+// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
+func HasBMI() bool {
+ return HasBMI1() && HasBMI2()
+}
+
+var hasBMI1 bool
+var hasBMI2 bool
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
new file mode 100644
index 0000000000000..e802579c4f967
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
@@ -0,0 +1,11 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package cpuinfo
+
+// go:noescape
+func x86extensions() (bmi1, bmi2 bool)
+
+func init() {
+ hasBMI1, hasBMI2 = x86extensions()
+}
diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
new file mode 100644
index 0000000000000..4465fbe9e905b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
@@ -0,0 +1,36 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+TEXT ·x86extensions(SB), NOSPLIT, $0
+ // 1. determine max EAX value
+ XORQ AX, AX
+ CPUID
+
+ CMPQ AX, $7
+ JB unsupported
+
+ // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
+ MOVQ $7, AX
+ MOVQ $0, CX
+ CPUID
+
+ BTQ $3, BX // bit 3 = BMI1
+ SETCS AL
+
+ BTQ $8, BX // bit 8 = BMI2
+ SETCS AH
+
+ MOVB AL, bmi1+0(FP)
+ MOVB AH, bmi2+1(FP)
+ RET
+
+unsupported:
+ XORQ AX, AX
+ MOVB AL, bmi1+0(FP)
+ MOVB AL, bmi2+1(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 511bba65db8f6..298c4f8e97da8 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
@@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
+//
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
@@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md
index e6716aeaee998..1d80c42a530b3 100644
--- a/vendor/github.com/klauspost/compress/s2/README.md
+++ b/vendor/github.com/klauspost/compress/s2/README.md
@@ -19,6 +19,7 @@ This is important, so you don't have to worry about spending CPU cycles on alrea
* Adjustable compression (3 levels)
* Concurrent stream compression
* Faster decompression, even for Snappy compatible content
+* Concurrent Snappy/S2 stream decompression
* Ability to quickly skip forward in compressed stream
* Random seeking with indexes
* Compatible with reading Snappy compressed content
@@ -324,35 +325,35 @@ The content compressed in this mode is fully compatible with the standard decode
Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
-| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
-|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
-| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% |
-| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - |
-| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% |
-| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - |
-| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% |
-| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - |
-| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% |
-| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - |
-| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% |
-| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - |
-| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% |
-| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - |
-| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% |
-| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - |
-| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% |
-| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - |
-| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% |
-| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - |
-| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% |
-| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - |
-| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% |
-| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - |
+| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
+|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
+| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% |
+| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - |
+| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% |
+| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - |
+| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% |
+| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - |
+| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% |
+| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - |
+| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% |
+| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - |
+| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% |
+| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - |
+| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% |
+| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - |
+| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% |
+| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - |
+| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% |
+| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - |
+| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% |
+| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - |
+| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% |
+| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - |
### Legend
-* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
-* `S2 throughput`: Throughput of S2 in MB/s.
+* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
+* `S2 Throughput`: Throughput of S2 in MB/s.
* `S2 % smaller`: How many percent of the Snappy output size is S2 better.
* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
@@ -360,7 +361,7 @@ Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all th
There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
-Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size.
+Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size.
The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
@@ -403,18 +404,37 @@ The "better" compression mode will actively look for shorter matches, which is w
Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
| File | S2 Throughput | S2 throughput |
-|--------------------------------|--------------|---------------|
-| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
-| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
-| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
-| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s |
-| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s |
-| enwik9.s2 | 1.67x | 681.53 MB/s |
-| adresser.json.s2 | 3.41x | 4230.53 MB/s |
-| silesia.tar.s2 | 1.52x | 811.58 |
+|--------------------------------|---------------|---------------|
+| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
+| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
+| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
+| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s |
+| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s |
+| enwik9.s2 | 1.67x | 681.53 MB/s |
+| adresser.json.s2 | 3.41x | 4230.53 MB/s |
+| silesia.tar.s2 | 1.52x | 811.58 |
Even though S2 typically compresses better than Snappy, decompression speed is always better.
+### Concurrent Stream Decompression
+
+For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent)
+that will decode a full stream using multiple goroutines.
+
+Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 <input>`, best of 3:
+
+| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` |
+|-------------------------------------------|------------|------------|------------|------------|-------------|
+| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s |
+| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s |
+| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s |
+| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s |
+| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s |
+
+Scaling can be expected to be pretty linear until memory bandwidth is saturated.
+
+For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads.
+
## Block compression
@@ -430,14 +450,14 @@ The most reliable is a wide dataset.
For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
53927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
-| * | Input | Output | Reduction | MB/s |
-|-------------------|------------|------------|-----------|--------|
-| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** |
-| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 |
-| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 |
-| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 |
-| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 |
-| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 |
+| * | Input | Output | Reduction | MB/s |
+|-------------------|------------|------------|------------|------------|
+| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** |
+| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 |
+| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 |
+| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 |
+| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 |
+| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 |
S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
"Better" mode provides the same compression speed as LZ4 with better compression ratio.
@@ -469,42 +489,23 @@ AMD64 assembly is use for both S2 and Snappy.
| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
|-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
-| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s |
-| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s |
-| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s |
-| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s |
-| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s |
-| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s |
-| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s |
-| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s |
-| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s |
-| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s |
-| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s |
-| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s |
-| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s |
-| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s |
-| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s |
-| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s |
-
-
-| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed |
-|-----------------------|-------------|------------------|----------|--------------|
-| html | 22.31% | 7.58% | 1.07x | 1.20x |
-| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x |
-| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x |
-| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x |
-| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x |
-| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x |
-| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x |
-| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x |
-| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x |
-| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x |
-| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x |
-| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x |
-| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x |
-| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x |
-| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x |
-| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x |
+| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s |
+| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s |
+| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s |
+| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s |
+| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s |
+| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s |
+| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s |
+| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s |
+| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s |
+| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s |
+| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s |
+| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s |
+| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s |
+| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s |
+| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s |
+| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s |
+
Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
@@ -523,42 +524,23 @@ So individual benchmarks should only be seen as a guideline and the overall pict
| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
-| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s |
-| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s |
-| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s |
-| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s |
-| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s |
-| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s |
-| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s |
-| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s |
-| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s |
-| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s |
-| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s |
-| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s |
-| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s |
-| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s |
-| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s |
-| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s |
-
-
-| Relative Perf | Snappy size | Better size | Better Speed | Better dec |
-|-----------------------|-------------|-------------|--------------|------------|
-| html | 22.31% | 13.18% | 0.48x | 0.98x |
-| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x |
-| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x |
-| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x |
-| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x |
-| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x |
-| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x |
-| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x |
-| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x |
-| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x |
-| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x |
-| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x |
-| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x |
-| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x |
-| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x |
-| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x |
+| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s |
+| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s |
+| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s |
+| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s |
+| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s |
+| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s |
+| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s |
+| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s |
+| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s |
+| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s |
+| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s |
+| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s |
+| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s |
+| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s |
+| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s |
+| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s |
+
Except for the mostly incompressible JPEG image compression is better and usually in the
double digits in terms of percentage reduction over Snappy.
@@ -585,29 +567,29 @@ Some examples compared on 16 core CPU, amd64 assembly used:
```
* enwik10
-Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s
-Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s
-Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s
+Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s
+Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s
+Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s
* github-june-2days-2019.json
-Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s
-Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s
-Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s
+Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s
+Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s
+Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s
* nyc-taxi-data-10M.csv
-Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s
-Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s
-Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s
+Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s
+Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s
+Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s
* 10gb.tar
-Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s
-Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s
-Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/
+Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s
+Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s
+Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/
* consensus.db.10gb
-Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s
-Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s
-Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s
+Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s
+Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s
+Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s
```
Decompression speed should be around the same as using the 'better' compression mode.
@@ -628,10 +610,10 @@ If you would like more control, you can use the s2 package as described below:
Snappy compatible blocks can be generated with the S2 encoder.
Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
-| Snappy | S2 replacement |
-|----------------------------|-------------------------|
-| snappy.Encode(...) | s2.EncodeSnappy(...) |
-| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
+| Snappy | S2 replacement |
+|---------------------------|-----------------------|
+| snappy.Encode(...) | s2.EncodeSnappy(...) |
+| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output.
@@ -640,12 +622,12 @@ Compression and speed is typically a bit better `MaxEncodedLen` is also smaller
Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
-| Encoder | Size | MB/s | Reduction |
-|-----------------------|------------|------------|------------
-| snappy.Encode | 1128706759 | 725.59 | 71.89% |
-| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% |
-| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
-| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%**|
+| Encoder | Size | MB/s | Reduction |
+|-----------------------|------------|------------|------------|
+| snappy.Encode | 1128706759 | 725.59 | 71.89% |
+| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% |
+| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
+| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** |
## Streams
@@ -704,7 +686,7 @@ To automatically add an index to a stream, add `WriterAddIndex()` option to your
Then the index will be added to the stream when `Close()` is called.
```
- // Add Index to stream...
+ // Add Index to stream...
enc := s2.NewWriter(w, s2.WriterAddIndex())
io.Copy(enc, r)
enc.Close()
@@ -714,7 +696,7 @@ If you want to store the index separately, you can use `CloseIndex()` instead of
This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`.
```
- // Get index for separate storage...
+ // Get index for separate storage...
enc := s2.NewWriter(w)
io.Copy(enc, r)
index, err := enc.CloseIndex()
@@ -815,6 +797,13 @@ This is done using the regular "Skip" function:
This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset.
+# Compact storage
+
+For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from
+a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load).
+
+This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors.
+
## Index Format:
Each block is structured as a snappy skippable block, with the chunk ID 0x99.
@@ -824,20 +813,20 @@ The block can be read from the front, but contains information so it can be read
Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding),
with un-encoded value length of 64 bits, unless other limits are specified.
-| Content | Format |
-|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------|
-| ID, `[1]byte` | Always 0x99. |
-| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. |
-| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". |
-| UncompressedSize, Varint | Total Uncompressed size. |
-| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. |
-| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. |
-| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. |
-| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. |
-| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. |
-| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. |
-| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. |
-| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. |
+| Content | Format |
+|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
+| ID, `[1]byte` | Always 0x99. |
+| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. |
+| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". |
+| UncompressedSize, Varint | Total Uncompressed size. |
+| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. |
+| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. |
+| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. |
+| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. |
+| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. |
+| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. |
+| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. |
+| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. |
For regular streams the uncompressed offsets are fully predictable,
so `HasUncompressedOffsets` allows to specify that compressed blocks all have
@@ -873,7 +862,7 @@ for each entry {
}
// Uncompressed uses previous offset and adds EstBlockSize
- entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize
+ entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff
}
@@ -894,13 +883,22 @@ for each entry {
}
// Compressed uses previous and our estimate.
- entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess
+ entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff
// Adjust compressed offset for next loop, integer truncating division must be used.
CompressGuess += cOff/2
}
```
+To decode from any given uncompressed offset `(wantOffset)`:
+
+* Iterate entries until `entry[n].UncompressedOffset > wantOffset`.
+* Start decoding from `entry[n-1].CompressedOffset`.
+* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream.
+
+See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface.
+
+
# Format Extensions
* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
@@ -923,10 +921,11 @@ The length is specified by reading the 3-bit length specified in the tag and dec
| 7 | 65540 + read 3 bytes |
This allows any repeat offset + length to be represented by 2 to 5 bytes.
+It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies.
Lengths are stored as little endian values.
-The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams.
+The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams.
Default streaming block size is 1MB.
diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go
index 9e7fce8856e06..27c0f3c2c45f7 100644
--- a/vendor/github.com/klauspost/compress/s2/decode.go
+++ b/vendor/github.com/klauspost/compress/s2/decode.go
@@ -11,6 +11,9 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math"
+ "runtime"
+ "sync"
)
var (
@@ -169,6 +172,14 @@ func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption {
}
}
+// ReaderIgnoreCRC will make the reader skip CRC calculation and checks.
+func ReaderIgnoreCRC() ReaderOption {
+ return func(r *Reader) error {
+ r.ignoreCRC = true
+ return nil
+ }
+}
+
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
@@ -191,18 +202,19 @@ type Reader struct {
paramsOK bool
snappyFrame bool
ignoreStreamID bool
+ ignoreCRC bool
}
// ensureBufferSize will ensure that the buffer can take at least n bytes.
// If false is returned the buffer exceeds maximum allowed size.
func (r *Reader) ensureBufferSize(n int) bool {
- if len(r.buf) >= n {
- return true
- }
if n > r.maxBufSize {
r.err = ErrCorrupt
return false
}
+ if cap(r.buf) >= n {
+ return true
+ }
// Realloc buffer.
r.buf = make([]byte, n)
return true
@@ -220,6 +232,7 @@ func (r *Reader) Reset(reader io.Reader) {
r.err = nil
r.i = 0
r.j = 0
+ r.blockStart = 0
r.readHeader = r.ignoreStreamID
}
@@ -344,7 +357,7 @@ func (r *Reader) Read(p []byte) (int, error) {
r.err = err
return 0, r.err
}
- if crc(r.decoded[:n]) != checksum {
+ if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
r.err = ErrCRC
return 0, r.err
}
@@ -385,7 +398,7 @@ func (r *Reader) Read(p []byte) (int, error) {
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
}
- if crc(r.decoded[:n]) != checksum {
+ if !r.ignoreCRC && crc(r.decoded[:n]) != checksum {
r.err = ErrCRC
return 0, r.err
}
@@ -435,6 +448,259 @@ func (r *Reader) Read(p []byte) (int, error) {
}
}
+// DecodeConcurrent will decode the full stream to w.
+// This function should not be combined with reading, seeking or other operations.
+// Up to 'concurrent' goroutines will be used.
+// If <= 0, runtime.NumCPU will be used.
+// On success the number of bytes decompressed nil and is returned.
+// This is mainly intended for bigger streams.
+func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) {
+ if r.i > 0 || r.j > 0 || r.blockStart > 0 {
+ return 0, errors.New("DecodeConcurrent called after ")
+ }
+ if concurrent <= 0 {
+ concurrent = runtime.NumCPU()
+ }
+
+ // Write to output
+ var errMu sync.Mutex
+ var aErr error
+ setErr := func(e error) (ok bool) {
+ errMu.Lock()
+ defer errMu.Unlock()
+ if e == nil {
+ return aErr == nil
+ }
+ if aErr == nil {
+ aErr = e
+ }
+ return false
+ }
+ hasErr := func() (ok bool) {
+ errMu.Lock()
+ v := aErr != nil
+ errMu.Unlock()
+ return v
+ }
+
+ var aWritten int64
+ toRead := make(chan []byte, concurrent)
+ writtenBlocks := make(chan []byte, concurrent)
+ queue := make(chan chan []byte, concurrent)
+ reUse := make(chan chan []byte, concurrent)
+ for i := 0; i < concurrent; i++ {
+ toRead <- make([]byte, 0, r.maxBufSize)
+ writtenBlocks <- make([]byte, 0, r.maxBufSize)
+ reUse <- make(chan []byte, 1)
+ }
+ // Writer
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for toWrite := range queue {
+ entry := <-toWrite
+ reUse <- toWrite
+ if hasErr() {
+ writtenBlocks <- entry
+ continue
+ }
+ n, err := w.Write(entry)
+ want := len(entry)
+ writtenBlocks <- entry
+ if err != nil {
+ setErr(err)
+ continue
+ }
+ if n != want {
+ setErr(io.ErrShortWrite)
+ continue
+ }
+ aWritten += int64(n)
+ }
+ }()
+
+ // Reader
+ defer func() {
+ close(queue)
+ if r.err != nil {
+ err = r.err
+ setErr(r.err)
+ }
+ wg.Wait()
+ if err == nil {
+ err = aErr
+ }
+ written = aWritten
+ }()
+
+ for !hasErr() {
+ if !r.readFull(r.buf[:4], true) {
+ if r.err == io.EOF {
+ r.err = nil
+ }
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ r.blockStart += int64(r.j)
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if chunkLen > r.maxBufSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ orgBuf := <-toRead
+ buf := orgBuf[:chunkLen]
+
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ wg.Add(1)
+
+ decoded := <-writtenBlocks
+ entry := <-reUse
+ queue <- entry
+ go func() {
+ defer wg.Done()
+ decoded = decoded[:n]
+ _, err := Decode(decoded, buf)
+ toRead <- orgBuf
+ if err != nil {
+ writtenBlocks <- decoded
+ setErr(err)
+ return
+ }
+ if !r.ignoreCRC && crc(decoded) != checksum {
+ writtenBlocks <- decoded
+ setErr(ErrCRC)
+ return
+ }
+ entry <- decoded
+ }()
+ continue
+
+ case chunkTypeUncompressedData:
+
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if chunkLen > r.maxBufSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ // Grab write buffer
+ orgBuf := <-writtenBlocks
+ buf := orgBuf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read content.
+ n := chunkLen - checksumSize
+
+ if r.snappyFrame && n > maxSnappyBlockSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if n > r.maxBlock {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ // Read uncompressed
+ buf = orgBuf[:n]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+
+ if !r.ignoreCRC && crc(buf) != checksum {
+ r.err = ErrCRC
+ return 0, r.err
+ }
+ entry := <-reUse
+ queue <- entry
+ entry <- buf
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ if string(r.buf[:len(magicBody)]) != magicBody {
+ if string(r.buf[:len(magicBody)]) != magicBodySnappy {
+ r.err = ErrCorrupt
+ return 0, r.err
+ } else {
+ r.snappyFrame = true
+ }
+ } else {
+ r.snappyFrame = false
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ // fmt.Printf("ERR chunktype: 0x%x\n", chunkType)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if chunkLen > maxChunkSize {
+ // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen)
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen)
+ if !r.skippable(r.buf, chunkLen, false, chunkType) {
+ return 0, r.err
+ }
+ }
+ return 0, r.err
+}
+
// Skip will skip n bytes forward in the decompressed output.
// For larger skips this consumes less CPU and is faster than reading output and discarding it.
// CRC is not checked on skipped blocks.
@@ -454,7 +720,11 @@ func (r *Reader) Skip(n int64) error {
// decoded[i:j] contains decoded bytes that have not yet been passed on.
left := int64(r.j - r.i)
if left >= n {
- r.i += int(n)
+ tmp := int64(r.i) + n
+ if tmp > math.MaxInt32 {
+ return errors.New("s2: internal overflow in skip")
+ }
+ r.i = int(tmp)
return nil
}
n -= int64(r.j - r.i)
@@ -526,6 +796,7 @@ func (r *Reader) Skip(n int64) error {
} else {
// Skip block completely
n -= int64(dLen)
+ r.blockStart += int64(dLen)
dLen = 0
}
r.i, r.j = 0, dLen
@@ -656,6 +927,15 @@ func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) {
err = r.index.LoadStream(rs)
if err != nil {
if err == ErrUnsupported {
+ // If we don't require random seeking, reset input and return.
+ if !random {
+ _, err = rs.Seek(pos, io.SeekStart)
+ if err != nil {
+ return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()}
+ }
+ r.index = nil
+ return &ReadSeeker{Reader: r}, nil
+ }
return nil, ErrCantSeek{Reason: "input stream does not contain an index"}
}
return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()}
@@ -699,8 +979,16 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
case io.SeekCurrent:
offset += r.blockStart + int64(r.i)
case io.SeekEnd:
- offset = -offset
+ if offset > 0 {
+ return 0, errors.New("seek after end of file")
+ }
+ offset = r.index.TotalUncompressed + offset
+ }
+
+ if offset < 0 {
+ return 0, errors.New("seek before start of file")
}
+
c, u, err := r.index.Find(offset)
if err != nil {
return r.blockStart + int64(r.i), err
@@ -712,10 +1000,6 @@ func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
return 0, err
}
- if offset < 0 {
- offset = r.index.TotalUncompressed + offset
- }
-
r.i = r.j // Remove rest of current block.
if u < offset {
// Forward inside block
diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go
index 1074ebd215e1b..11300c3a81041 100644
--- a/vendor/github.com/klauspost/compress/s2/decode_other.go
+++ b/vendor/github.com/klauspost/compress/s2/decode_other.go
@@ -28,6 +28,9 @@ func s2Decode(dst, src []byte) int {
// As long as we can read at least 5 bytes...
for s < len(src)-5 {
+ // Removing bounds checks is SLOWER, when if doing
+ // in := src[s:s+5]
+ // Checked on Go 1.18
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
@@ -38,14 +41,19 @@ func s2Decode(dst, src []byte) int {
s += 2
x = uint32(src[s-1])
case x == 61:
+ in := src[s : s+3]
+ x = uint32(in[1]) | uint32(in[2])<<8
s += 3
- x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
+ in := src[s : s+4]
+ // Load as 32 bit and shift down.
+ x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x >>= 8
s += 4
- x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
+ in := src[s : s+5]
+ x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
s += 5
- x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
@@ -62,8 +70,8 @@ func s2Decode(dst, src []byte) int {
case tagCopy1:
s += 2
- length = int(src[s-2]) >> 2 & 0x7
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ length = int(src[s-2]) >> 2 & 0x7
if toffset == 0 {
if debug {
fmt.Print("(repeat) ")
@@ -71,14 +79,16 @@ func s2Decode(dst, src []byte) int {
// keep last offset
switch length {
case 5:
+ length = int(src[s]) + 4
s += 1
- length = int(uint32(src[s-1])) + 4
case 6:
+ in := src[s : s+2]
+ length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
s += 2
- length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
case 7:
+ in := src[s : s+3]
+ length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16)
s += 3
- length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
default: // 0-> 4
}
} else {
@@ -86,14 +96,16 @@ func s2Decode(dst, src []byte) int {
}
length += 4
case tagCopy2:
+ in := src[s : s+3]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8)
+ length = 1 + int(in[0])>>2
s += 3
- length = 1 + int(src[s-3])>>2
- offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
+ in := src[s : s+5]
+ offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
+ length = 1 + int(in[0])>>2
s += 5
- length = 1 + int(src[s-5])>>2
- offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || d < offset || length > len(dst)-d {
diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go
index 59f992ca6ebea..1aefabf313623 100644
--- a/vendor/github.com/klauspost/compress/s2/encode.go
+++ b/vendor/github.com/klauspost/compress/s2/encode.go
@@ -1119,12 +1119,6 @@ func (w *Writer) closeIndex(idx bool) ([]byte, error) {
if w.appendIndex {
w.written += int64(len(index))
}
- if true {
- _, err := w.index.Load(index)
- if err != nil {
- panic(err)
- }
- }
}
if w.pad > 1 {
diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go
index 8b16c38a68f63..54c71d3b5d90a 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_all.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_all.go
@@ -58,8 +58,9 @@ func encodeGo(dst, src []byte) []byte {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockGo(dst, src []byte) (d int) {
// Initialize the hash table.
const (
diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
index e612225f4d358..6b93daa5ae684 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go
@@ -8,8 +8,9 @@ package s2
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
const (
// Use 12 bit table when less than...
@@ -43,8 +44,9 @@ func encodeBlock(dst, src []byte) (d int) {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetter(dst, src []byte) (d int) {
const (
// Use 12 bit table when less than...
@@ -78,8 +80,9 @@ func encodeBlockBetter(dst, src []byte) (d int) {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockSnappy(dst, src []byte) (d int) {
const (
// Use 12 bit table when less than...
@@ -112,8 +115,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
const (
// Use 12 bit table when less than...
diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go
index 4480347769a04..1b7ea394fae93 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_best.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_best.go
@@ -15,8 +15,9 @@ import (
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBest(dst, src []byte) (d int) {
// Initialize the hash tables.
const (
@@ -176,14 +177,21 @@ func encodeBlockBest(dst, src []byte) (d int) {
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
}
// Search for a match at best match end, see if that is better.
- if sAt := best.s + best.length; sAt < sLimit {
- sBack := best.s
- backL := best.length
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 1-2 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ const skipEnd = 1
+ if sAt := best.s + best.length - skipEnd; sAt < sLimit {
+
+ sBack := best.s + skipBeginning - skipEnd
+ backL := best.length - skipBeginning
// Load initial values
cv = load64(src, sBack)
- // Search for mismatch
+
+ // Grab candidates...
next := lTable[hash8(load64(src, sAt), lTableBits)]
- //next := sTable[hash4(load64(src, sAt), sTableBits)]
if checkAt := getCur(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
@@ -191,6 +199,16 @@ func encodeBlockBest(dst, src []byte) (d int) {
if checkAt := getPrev(next) - backL; checkAt > 0 {
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
}
+ // Disabled: Extremely small gain
+ if false {
+ next = sTable[hash4(load64(src, sAt), sTableBits)]
+ if checkAt := getCur(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ if checkAt := getPrev(next) - backL; checkAt > 0 {
+ best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
+ }
+ }
}
}
}
@@ -288,8 +306,9 @@ emitRemainder:
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBestSnappy(dst, src []byte) (d int) {
// Initialize the hash tables.
const (
@@ -370,7 +389,7 @@ func encodeBlockBestSnappy(dst, src []byte) (d int) {
}
offset := m.s - m.offset
- return score - emitCopySize(offset, m.length)
+ return score - emitCopyNoRepeatSize(offset, m.length)
}
matchAt := func(offset, s int, first uint32) match {
@@ -546,6 +565,7 @@ emitRemainder:
// emitCopySize returns the size to encode the offset+length
//
// It assumes that:
+//
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
func emitCopySize(offset, length int) int {
@@ -567,6 +587,10 @@ func emitCopySize(offset, length int) int {
// Offset no more than 2 bytes.
if length > 64 {
+ if offset < 2048 {
+ // Emit 8 bytes, then rest as repeats...
+ return 2 + emitRepeatSize(offset, length-8)
+ }
// Emit remaining as repeats, at least 4 bytes remain.
return 3 + emitRepeatSize(offset, length-60)
}
@@ -577,6 +601,29 @@ func emitCopySize(offset, length int) int {
return 2
}
+// emitCopyNoRepeatSize returns the size to encode the offset+length
+//
+// It assumes that:
+//
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
+func emitCopyNoRepeatSize(offset, length int) int {
+ if offset >= 65536 {
+ return 5 + 5*(length/64)
+ }
+
+ // Offset no more than 2 bytes.
+ if length > 64 {
+ // Emit remaining as repeats, at least 4 bytes remain.
+ return 3 + 3*(length/60)
+ }
+ if length >= 12 || offset >= 2048 {
+ return 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ return 2
+}
+
// emitRepeatSize returns the number of bytes required to encode a repeat.
// Length must be at least 4 and < 1<<24
func emitRepeatSize(offset, length int) int {
diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go
index 943215b8ae86b..3b66ba42bf392 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_better.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_better.go
@@ -42,8 +42,9 @@ func hash8(u uint64, h uint8) uint32 {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterGo(dst, src []byte) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
@@ -56,7 +57,7 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
// Initialize the hash tables.
const (
// Long hash matches.
- lTableBits = 16
+ lTableBits = 17
maxLTableSize = 1 << lTableBits
// Short hash matches.
@@ -97,9 +98,26 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
lTable[hashL] = uint32(s)
sTable[hashS] = uint32(s)
+ valLong := load64(src, candidateL)
+ valShort := load64(src, candidateS)
+
+ // If long matches at least 8 bytes, use that.
+ if cv == valLong {
+ break
+ }
+ if cv == valShort {
+ candidateL = candidateS
+ break
+ }
+
// Check repeat at offset checkRep.
const checkRep = 1
- if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
+ // Minimum length of a repeat. Tested with various values.
+ // While 4-5 offers improvements in some, 6 reduces
+ // regressions significantly.
+ const wantRepeatBytes = 6
+ const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
+ if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
base := s + checkRep
// Extend back
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
@@ -109,8 +127,8 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
d += emitLiteral(dst[d:], src[nextEmit:base])
// Extend forward
- candidate := s - repeat + 4 + checkRep
- s += 4 + checkRep
+ candidate := s - repeat + wantRepeatBytes + checkRep
+ s += wantRepeatBytes + checkRep
for s < len(src) {
if len(src)-s < 8 {
if src[s] == src[candidate] {
@@ -127,28 +145,40 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
s += 8
candidate += 8
}
- if nextEmit > 0 {
- // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
- d += emitRepeat(dst[d:], repeat, s-base)
- } else {
- // First match, cannot be repeat.
- d += emitCopy(dst[d:], repeat, s-base)
- }
+ // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
+ d += emitRepeat(dst[d:], repeat, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
+ // Index in-between
+ index0 := base + 1
+ index1 := s - 2
+
+ cv = load64(src, s)
+ for index0 < index1 {
+ cv0 := load64(src, index0)
+ cv1 := load64(src, index1)
+ lTable[hash7(cv0, lTableBits)] = uint32(index0)
+ sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
+ sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 2
+ index1 -= 2
+ }
cv = load64(src, s)
continue
}
- if uint32(cv) == load32(src, candidateL) {
+ // Long likely matches 7, so take that.
+ if uint32(cv) == uint32(valLong) {
break
}
// Check our short candidate
- if uint32(cv) == load32(src, candidateS) {
+ if uint32(cv) == uint32(valShort) {
// Try a long candidate at s+1
hashL = hash7(cv>>8, lTableBits)
candidateL = int(lTable[hashL])
@@ -227,21 +257,29 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
// Do we have space for more, if not bail.
return 0
}
- // Index match start+1 (long) and start+2 (short)
+
+ // Index short & long
index0 := base + 1
- // Index match end-2 (long) and end-1 (short)
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
- cv = load64(src, s)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
- lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1)
- lTable[hash7(cv1, lTableBits)] = uint32(index1)
- lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
- sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // index every second long in between.
+ for index0 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ index0 += 2
+ index1 -= 2
+ }
}
emitRemainder:
@@ -260,8 +298,9 @@ emitRemainder:
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
@@ -402,21 +441,29 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
// Do we have space for more, if not bail.
return 0
}
- // Index match start+1 (long) and start+2 (short)
+
+ // Index short & long
index0 := base + 1
- // Index match end-2 (long) and end-1 (short)
index1 := s - 2
cv0 := load64(src, index0)
cv1 := load64(src, index1)
- cv = load64(src, s)
lTable[hash7(cv0, lTableBits)] = uint32(index0)
- lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1)
- lTable[hash7(cv1, lTableBits)] = uint32(index1)
- lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
- sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2)
+
+ lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
+ index0 += 1
+ index1 -= 1
+ cv = load64(src, s)
+
+ // index every second long in between.
+ for index0 < index1 {
+ lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
+ lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
+ index0 += 2
+ index1 -= 2
+ }
}
emitRemainder:
diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go
index 43d43534e4f4a..db08fc355e1e8 100644
--- a/vendor/github.com/klauspost/compress/s2/encode_go.go
+++ b/vendor/github.com/klauspost/compress/s2/encode_go.go
@@ -12,6 +12,7 @@ import (
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src))
func encodeBlock(dst, src []byte) (d int) {
if len(src) < minNonLiteralBlockSize {
@@ -25,6 +26,7 @@ func encodeBlock(dst, src []byte) (d int) {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src))
func encodeBlockBetter(dst, src []byte) (d int) {
return encodeBlockBetterGo(dst, src)
@@ -35,6 +37,7 @@ func encodeBlockBetter(dst, src []byte) (d int) {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src))
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
return encodeBlockBetterSnappyGo(dst, src)
@@ -45,6 +48,7 @@ func encodeBlockBetterSnappy(dst, src []byte) (d int) {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src))
func encodeBlockSnappy(dst, src []byte) (d int) {
if len(src) < minNonLiteralBlockSize {
@@ -56,6 +60,7 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 0 <= len(lit) && len(lit) <= math.MaxUint32
func emitLiteral(dst, lit []byte) int {
@@ -146,6 +151,7 @@ func emitRepeat(dst []byte, offset, length int) int {
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
@@ -180,14 +186,23 @@ func emitCopy(dst []byte, offset, length int) int {
// Offset no more than 2 bytes.
if length > 64 {
- // Emit a length 60 copy, encoded as 3 bytes.
- // Emit remaining as repeat value (minimum 4 bytes).
- dst[2] = uint8(offset >> 8)
- dst[1] = uint8(offset)
- dst[0] = 59<<2 | tagCopy2
- length -= 60
+ off := 3
+ if offset < 2048 {
+ // emit 8 bytes as tagCopy1, rest as repeats.
+ dst[1] = uint8(offset)
+ dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
+ length -= 8
+ off = 2
+ } else {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ // Emit remaining as repeat value (minimum 4 bytes).
+ dst[2] = uint8(offset >> 8)
+ dst[1] = uint8(offset)
+ dst[0] = 59<<2 | tagCopy2
+ length -= 60
+ }
// Emit remaining as repeats, at least 4 bytes remain.
- return 3 + emitRepeat(dst[3:], offset, length)
+ return off + emitRepeat(dst[off:], offset, length)
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
@@ -205,6 +220,7 @@ func emitCopy(dst []byte, offset, length int) int {
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= math.MaxUint32
// 4 <= length && length <= 1 << 24
@@ -264,8 +280,8 @@ func emitCopyNoRepeat(dst []byte, offset, length int) int {
// matchLen returns how many bytes match in a and b
//
// It assumes that:
-// len(a) <= len(b)
//
+// len(a) <= len(b)
func matchLen(a []byte, b []byte) int {
b = b[:len(a)]
var checked int
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
index c8cf7b69e81d6..7e00bac3eae5f 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
@@ -1,10 +1,11 @@
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
-//go:build !appengine && !noasm && gc
-// +build !appengine,!noasm,gc
+//go:build !appengine && !noasm && gc && !noasm
package s2
+func _dummy_()
+
// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
// Maximum input 4294967295 bytes.
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
@@ -148,8 +149,9 @@ func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
-// dst is long enough to hold the encoded bytes with margin of 0 bytes
-// 0 <= len(lit) && len(lit) <= math.MaxUint32
+//
+// dst is long enough to hold the encoded bytes with margin of 0 bytes
+// 0 <= len(lit) && len(lit) <= math.MaxUint32
//
//go:noescape
func emitLiteral(dst []byte, lit []byte) int
@@ -163,9 +165,10 @@ func emitRepeat(dst []byte, offset int, length int) int
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
-// dst is long enough to hold the encoded bytes
-// 1 <= offset && offset <= math.MaxUint32
-// 4 <= length && length <= 1 << 24
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
//
//go:noescape
func emitCopy(dst []byte, offset int, length int) int
@@ -173,9 +176,10 @@ func emitCopy(dst []byte, offset int, length int) int
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
-// dst is long enough to hold the encoded bytes
-// 1 <= offset && offset <= math.MaxUint32
-// 4 <= length && length <= 1 << 24
+//
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= math.MaxUint32
+// 4 <= length && length <= 1 << 24
//
//go:noescape
func emitCopyNoRepeat(dst []byte, offset int, length int) int
@@ -183,7 +187,8 @@ func emitCopyNoRepeat(dst []byte, offset int, length int) int
// matchLen returns how many bytes match in a and b
//
// It assumes that:
-// len(a) <= len(b)
+//
+// len(a) <= len(b)
//
//go:noescape
func matchLen(a []byte, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
index 1ac65a0e352fa..81a487d6defac 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -1,13 +1,20 @@
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
-// +build !appengine
-// +build !noasm
-// +build gc
+//go:build !appengine && !noasm && gc && !noasm
#include "textflag.h"
+// func _dummy_()
+TEXT ·_dummy_(SB), $0
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+ RET
+
// func encodeBlockAsm(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm(SB), $65560-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000200, CX
@@ -243,35 +250,57 @@ emit_literal_done_repeat_emit_encodeBlockAsm:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm
+ JL matchlen_match4_repeat_extend_encodeBlockAsm
matchlen_loopback_repeat_extend_encodeBlockAsm:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm
matchlen_loop_repeat_extend_encodeBlockAsm:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm
-
-matchlen_single_repeat_extend_encodeBlockAsm:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm:
+ JZ repeat_extend_forward_end_encodeBlockAsm
+
+matchlen_match4_repeat_extend_encodeBlockAsm:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm
repeat_extend_forward_end_encodeBlockAsm:
ADDL R12, CX
@@ -444,6 +473,90 @@ four_bytes_remain_repeat_as_copy_encodeBlockAsm:
two_byte_offset_repeat_as_copy_encodeBlockAsm:
CMPL SI, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm
+ CMPL DI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(AX)
+ MOVL DI, R9
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, SI
+
+ // emitRepeat
+ LEAL -4(SI), SI
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
+ JLE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ CMPL SI, $0x00000104
+ JLT repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL SI, $0x00010100
+ JLT repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ CMPL SI, $0x0100ffff
+ JLT repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+ LEAL -16842747(SI), SI
+ MOVW $0x001d, (AX)
+ MOVW $0xfffb, 2(AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b
+
+repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (AX)
+ MOVW SI, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -256(SI), SI
+ MOVW $0x0019, (AX)
+ MOVW SI, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -4(SI), SI
+ MOVW $0x0015, (AX)
+ MOVB SI, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm
+
+long_offset_short_repeat_as_copy_encodeBlockAsm:
MOVB $0xee, (AX)
MOVW DI, 1(AX)
LEAL -60(SI), SI
@@ -748,35 +861,57 @@ match_nolit_loop_encodeBlockAsm:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm
+ JL matchlen_match4_match_nolit_encodeBlockAsm
matchlen_loopback_match_nolit_encodeBlockAsm:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm
matchlen_loop_match_nolit_encodeBlockAsm:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm
-
-matchlen_single_match_nolit_encodeBlockAsm:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm:
+ JZ match_nolit_end_encodeBlockAsm
+
+matchlen_match4_match_nolit_encodeBlockAsm:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm
match_nolit_end_encodeBlockAsm:
ADDL R10, CX
@@ -879,6 +1014,90 @@ four_bytes_remain_match_nolit_encodeBlockAsm:
two_byte_offset_match_nolit_encodeBlockAsm:
CMPL R10, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm
+ CMPL SI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ MOVL SI, R8
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
+ JLE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JLT repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JLT repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL R10, $0x00010100
+ JLT repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ CMPL R10, $0x0100ffff
+ JLT repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b
+ LEAL -16842747(R10), R10
+ MOVW $0x001d, (AX)
+ MOVW $0xfffb, 2(AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b
+
+repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm
+
+long_offset_short_match_nolit_encodeBlockAsm:
MOVB $0xee, (AX)
MOVW SI, 1(AX)
LEAL -60(R10), R10
@@ -1069,17 +1288,36 @@ memmove_emit_remainder_encodeBlockAsm:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm
emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16:
@@ -1162,7 +1400,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm:
RET
// func encodeBlockAsm4MB(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm4MB(SB), $65560-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000200, CX
@@ -1390,35 +1628,57 @@ emit_literal_done_repeat_emit_encodeBlockAsm4MB:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm4MB
+ JL matchlen_match4_repeat_extend_encodeBlockAsm4MB
matchlen_loopback_repeat_extend_encodeBlockAsm4MB:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm4MB
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_loop_repeat_extend_encodeBlockAsm4MB:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm4MB
-
-matchlen_single_repeat_extend_encodeBlockAsm4MB:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm4MB
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm4MB:
+ JZ repeat_extend_forward_end_encodeBlockAsm4MB
+
+matchlen_match4_repeat_extend_encodeBlockAsm4MB:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm4MB
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm4MB:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm4MB:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm4MB
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm4MB
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm4MB
repeat_extend_forward_end_encodeBlockAsm4MB:
ADDL R12, CX
@@ -1569,6 +1829,77 @@ four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB:
two_byte_offset_repeat_as_copy_encodeBlockAsm4MB:
CMPL SI, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB
+ CMPL DI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, SI
+
+ // emitRepeat
+ LEAL -4(SI), SI
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
+ JLE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ CMPL SI, $0x00000104
+ JLT repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL SI, $0x00010100
+ JLT repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(SI), SI
+ MOVL SI, DI
+ MOVW $0x001d, (AX)
+ MOVW SI, 2(AX)
+ SARL $0x10, DI
+ MOVB DI, 4(AX)
+ ADDQ $0x05, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(SI), SI
+ MOVW $0x0019, (AX)
+ MOVW SI, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(SI), SI
+ MOVW $0x0015, (AX)
+ MOVB SI, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm4MB
+
+long_offset_short_repeat_as_copy_encodeBlockAsm4MB:
MOVB $0xee, (AX)
MOVW DI, 1(AX)
LEAL -60(SI), SI
@@ -1854,35 +2185,57 @@ match_nolit_loop_encodeBlockAsm4MB:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm4MB
+ JL matchlen_match4_match_nolit_encodeBlockAsm4MB
matchlen_loopback_match_nolit_encodeBlockAsm4MB:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm4MB
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm4MB
matchlen_loop_match_nolit_encodeBlockAsm4MB:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm4MB
-
-matchlen_single_match_nolit_encodeBlockAsm4MB:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm4MB
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm4MB:
+ JZ match_nolit_end_encodeBlockAsm4MB
+
+matchlen_match4_match_nolit_encodeBlockAsm4MB:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm4MB
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm4MB:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm4MB
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm4MB:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm4MB
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm4MB
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm4MB
match_nolit_end_encodeBlockAsm4MB:
ADDL R10, CX
@@ -1974,6 +2327,77 @@ four_bytes_remain_match_nolit_encodeBlockAsm4MB:
two_byte_offset_match_nolit_encodeBlockAsm4MB:
CMPL R10, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm4MB
+ CMPL SI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm4MB
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
+ JLE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JLT repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JLT repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ CMPL R10, $0x00010100
+ JLT repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(R10), R10
+ MOVL R10, SI
+ MOVW $0x001d, (AX)
+ MOVW R10, 2(AX)
+ SARL $0x10, SI
+ MOVB SI, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm4MB
+
+long_offset_short_match_nolit_encodeBlockAsm4MB:
MOVB $0xee, (AX)
MOVW SI, 1(AX)
LEAL -60(R10), R10
@@ -2145,17 +2569,36 @@ memmove_emit_remainder_encodeBlockAsm4MB:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16:
@@ -2238,7 +2681,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm4MB:
RET
// func encodeBlockAsm12B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm12B(SB), $16408-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000080, CX
@@ -2455,35 +2898,57 @@ emit_literal_done_repeat_emit_encodeBlockAsm12B:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm12B
+ JL matchlen_match4_repeat_extend_encodeBlockAsm12B
matchlen_loopback_repeat_extend_encodeBlockAsm12B:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm12B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm12B
matchlen_loop_repeat_extend_encodeBlockAsm12B:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm12B
-
-matchlen_single_repeat_extend_encodeBlockAsm12B:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm12B
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm12B:
+ JZ repeat_extend_forward_end_encodeBlockAsm12B
+
+matchlen_match4_repeat_extend_encodeBlockAsm12B:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm12B
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm12B:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm12B
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm12B:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm12B
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm12B
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm12B
repeat_extend_forward_end_encodeBlockAsm12B:
ADDL R12, CX
@@ -2542,6 +3007,65 @@ repeat_as_copy_encodeBlockAsm12B:
two_byte_offset_repeat_as_copy_encodeBlockAsm12B:
CMPL SI, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, SI
+
+ // emitRepeat
+ LEAL -4(SI), SI
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
+ JLE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ CMPL SI, $0x00000104
+ JLT repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b
+ LEAL -256(SI), SI
+ MOVW $0x0019, (AX)
+ MOVW SI, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(SI), SI
+ MOVW $0x0015, (AX)
+ MOVB SI, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm12B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm12B:
MOVB $0xee, (AX)
MOVW DI, 1(AX)
LEAL -60(SI), SI
@@ -2804,35 +3328,57 @@ match_nolit_loop_encodeBlockAsm12B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm12B
+ JL matchlen_match4_match_nolit_encodeBlockAsm12B
matchlen_loopback_match_nolit_encodeBlockAsm12B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm12B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm12B
matchlen_loop_match_nolit_encodeBlockAsm12B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm12B
-
-matchlen_single_match_nolit_encodeBlockAsm12B:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm12B
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm12B:
+ JZ match_nolit_end_encodeBlockAsm12B
+
+matchlen_match4_match_nolit_encodeBlockAsm12B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm12B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm12B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm12B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm12B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm12B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm12B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm12B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm12B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm12B
match_nolit_end_encodeBlockAsm12B:
ADDL R10, CX
@@ -2844,6 +3390,65 @@ match_nolit_end_encodeBlockAsm12B:
two_byte_offset_match_nolit_encodeBlockAsm12B:
CMPL R10, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm12B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm12B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
+ JLE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JLT repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JLT repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm12B
+
+long_offset_short_match_nolit_encodeBlockAsm12B:
MOVB $0xee, (AX)
MOVW SI, 1(AX)
LEAL -60(R10), R10
@@ -2992,17 +3597,36 @@ memmove_emit_remainder_encodeBlockAsm12B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16:
@@ -3085,7 +3709,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm12B:
RET
// func encodeBlockAsm10B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm10B(SB), $4120-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000020, CX
@@ -3302,35 +3926,57 @@ emit_literal_done_repeat_emit_encodeBlockAsm10B:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm10B
+ JL matchlen_match4_repeat_extend_encodeBlockAsm10B
matchlen_loopback_repeat_extend_encodeBlockAsm10B:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm10B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm10B
matchlen_loop_repeat_extend_encodeBlockAsm10B:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm10B
-
-matchlen_single_repeat_extend_encodeBlockAsm10B:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm10B
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm10B:
+ JZ repeat_extend_forward_end_encodeBlockAsm10B
+
+matchlen_match4_repeat_extend_encodeBlockAsm10B:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm10B
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm10B:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm10B
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm10B:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm10B
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm10B
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm10B
repeat_extend_forward_end_encodeBlockAsm10B:
ADDL R12, CX
@@ -3389,6 +4035,65 @@ repeat_as_copy_encodeBlockAsm10B:
two_byte_offset_repeat_as_copy_encodeBlockAsm10B:
CMPL SI, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, SI
+
+ // emitRepeat
+ LEAL -4(SI), SI
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ MOVL SI, R8
+ LEAL -4(SI), SI
+ CMPL R8, $0x08
+ JLE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL R8, $0x0c
+ JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL DI, $0x00000800
+ JLT repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ CMPL SI, $0x00000104
+ JLT repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b
+ LEAL -256(SI), SI
+ MOVW $0x0019, (AX)
+ MOVW SI, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(SI), SI
+ MOVW $0x0015, (AX)
+ MOVB SI, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b:
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm10B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm10B:
MOVB $0xee, (AX)
MOVW DI, 1(AX)
LEAL -60(SI), SI
@@ -3651,46 +4356,127 @@ match_nolit_loop_encodeBlockAsm10B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm10B
+ JL matchlen_match4_match_nolit_encodeBlockAsm10B
matchlen_loopback_match_nolit_encodeBlockAsm10B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm10B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm10B
matchlen_loop_match_nolit_encodeBlockAsm10B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm10B
-
-matchlen_single_match_nolit_encodeBlockAsm10B:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm10B
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm10B:
+ JZ match_nolit_end_encodeBlockAsm10B
+
+matchlen_match4_match_nolit_encodeBlockAsm10B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm10B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm10B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm10B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm10B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm10B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm10B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm10B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm10B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm10B
-match_nolit_end_encodeBlockAsm10B:
- ADDL R10, CX
- MOVL 16(SP), SI
- ADDL $0x04, R10
- MOVL CX, 12(SP)
+match_nolit_end_encodeBlockAsm10B:
+ ADDL R10, CX
+ MOVL 16(SP), SI
+ ADDL $0x04, R10
+ MOVL CX, 12(SP)
+
+ // emitCopy
+two_byte_offset_match_nolit_encodeBlockAsm10B:
+ CMPL R10, $0x40
+ JLE two_byte_offset_short_match_nolit_encodeBlockAsm10B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm10B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ MOVL R10, DI
+ LEAL -4(R10), R10
+ CMPL DI, $0x08
+ JLE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ CMPL SI, $0x00000800
+ JLT repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JLT repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b:
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm10B
- // emitCopy
-two_byte_offset_match_nolit_encodeBlockAsm10B:
- CMPL R10, $0x40
- JLE two_byte_offset_short_match_nolit_encodeBlockAsm10B
+long_offset_short_match_nolit_encodeBlockAsm10B:
MOVB $0xee, (AX)
MOVW SI, 1(AX)
LEAL -60(R10), R10
@@ -3839,17 +4625,36 @@ memmove_emit_remainder_encodeBlockAsm10B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16:
@@ -3932,7 +4737,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm10B:
RET
// func encodeBlockAsm8B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm8B(SB), $1048-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000008, CX
@@ -4149,35 +4954,57 @@ emit_literal_done_repeat_emit_encodeBlockAsm8B:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm8B
+ JL matchlen_match4_repeat_extend_encodeBlockAsm8B
matchlen_loopback_repeat_extend_encodeBlockAsm8B:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm8B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm8B
matchlen_loop_repeat_extend_encodeBlockAsm8B:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm8B
-
-matchlen_single_repeat_extend_encodeBlockAsm8B:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm8B
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm8B:
+ JZ repeat_extend_forward_end_encodeBlockAsm8B
+
+matchlen_match4_repeat_extend_encodeBlockAsm8B:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm8B
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm8B:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm8B
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm8B:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm8B
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm8B
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm8B
repeat_extend_forward_end_encodeBlockAsm8B:
ADDL R12, CX
@@ -4232,6 +5059,61 @@ repeat_as_copy_encodeBlockAsm8B:
two_byte_offset_repeat_as_copy_encodeBlockAsm8B:
CMPL SI, $0x40
JLE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B
+ CMPL DI, $0x00000800
+ JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B
+ MOVL $0x00000001, R8
+ LEAL 16(R8), R8
+ MOVB DI, 1(AX)
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, R8
+ MOVB R8, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, SI
+
+ // emitRepeat
+ LEAL -4(SI), SI
+ JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ MOVL SI, DI
+ LEAL -4(SI), SI
+ CMPL DI, $0x08
+ JLE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ CMPL DI, $0x0c
+ JGE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ CMPL SI, $0x00000104
+ JLT repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b
+ LEAL -256(SI), SI
+ MOVW $0x0019, (AX)
+ MOVW SI, 2(AX)
+ ADDQ $0x04, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(SI), SI
+ MOVW $0x0015, (AX)
+ MOVB SI, 2(AX)
+ ADDQ $0x03, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, SI
+ ORL $0x01, SI
+ MOVW SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+ XORQ R8, R8
+ LEAL 1(R8)(SI*4), SI
+ MOVB DI, 1(AX)
+ SARL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ JMP repeat_end_emit_encodeBlockAsm8B
+
+long_offset_short_repeat_as_copy_encodeBlockAsm8B:
MOVB $0xee, (AX)
MOVW DI, 1(AX)
LEAL -60(SI), SI
@@ -4488,35 +5370,57 @@ match_nolit_loop_encodeBlockAsm8B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm8B
+ JL matchlen_match4_match_nolit_encodeBlockAsm8B
matchlen_loopback_match_nolit_encodeBlockAsm8B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm8B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm8B
matchlen_loop_match_nolit_encodeBlockAsm8B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm8B
-
-matchlen_single_match_nolit_encodeBlockAsm8B:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm8B
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm8B:
+ JZ match_nolit_end_encodeBlockAsm8B
+
+matchlen_match4_match_nolit_encodeBlockAsm8B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm8B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm8B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm8B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm8B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm8B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm8B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm8B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm8B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm8B
match_nolit_end_encodeBlockAsm8B:
ADDL R10, CX
@@ -4528,6 +5432,61 @@ match_nolit_end_encodeBlockAsm8B:
two_byte_offset_match_nolit_encodeBlockAsm8B:
CMPL R10, $0x40
JLE two_byte_offset_short_match_nolit_encodeBlockAsm8B
+ CMPL SI, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBlockAsm8B
+ MOVL $0x00000001, DI
+ LEAL 16(DI), DI
+ MOVB SI, 1(AX)
+ SHRL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, DI
+ MOVB DI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R10
+
+ // emitRepeat
+ LEAL -4(R10), R10
+ JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ MOVL R10, SI
+ LEAL -4(R10), R10
+ CMPL SI, $0x08
+ JLE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ CMPL R10, $0x00000104
+ JLT repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b
+ LEAL -256(R10), R10
+ MOVW $0x0019, (AX)
+ MOVW R10, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(R10), R10
+ MOVW $0x0015, (AX)
+ MOVB R10, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, R10
+ ORL $0x01, R10
+ MOVW R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+ XORQ DI, DI
+ LEAL 1(DI)(R10*4), R10
+ MOVB SI, 1(AX)
+ SARL $0x08, SI
+ SHLL $0x05, SI
+ ORL SI, R10
+ MOVB R10, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBlockAsm8B
+
+long_offset_short_match_nolit_encodeBlockAsm8B:
MOVB $0xee, (AX)
MOVW SI, 1(AX)
LEAL -60(R10), R10
@@ -4670,17 +5629,36 @@ memmove_emit_remainder_encodeBlockAsm8B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16:
@@ -4763,10 +5741,10 @@ emit_literal_done_emit_remainder_encodeBlockAsm8B:
RET
// func encodeBetterBlockAsm(dst []byte, src []byte) int
-// Requires: SSE2
-TEXT ·encodeBetterBlockAsm(SB), $327704-56
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm(SB), $589848-56
MOVQ dst_base+0(FP), AX
- MOVQ $0x00000a00, CX
+ MOVQ $0x00001200, CX
LEAQ 24(SP), DX
PXOR X0, X0
@@ -4818,27 +5796,37 @@ check_maxskip_cont_encodeBetterBlockAsm:
MOVQ DI, R11
SHLQ $0x08, R10
IMULQ R9, R10
- SHRQ $0x30, R10
+ SHRQ $0x2f, R10
SHLQ $0x20, R11
IMULQ SI, R11
SHRQ $0x32, R11
MOVL 24(SP)(R10*4), SI
- MOVL 262168(SP)(R11*4), R8
+ MOVL 524312(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
- MOVL CX, 262168(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 524312(SP)(R11*4)
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeBetterBlockAsm
- MOVL 20(SP), CX
- JMP search_loop_encodeBetterBlockAsm
+ CMPQ R11, DI
+ JNE no_short_found_encodeBetterBlockAsm
+ MOVL R8, SI
+ JMP candidate_match_encodeBetterBlockAsm
+
+no_short_found_encodeBetterBlockAsm:
+ CMPL R10, DI
+ JEQ candidate_match_encodeBetterBlockAsm
+ CMPL R11, DI
+ JEQ candidateS_match_encodeBetterBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm
candidateS_match_encodeBetterBlockAsm:
SHRQ $0x08, DI
MOVQ DI, R10
SHLQ $0x08, R10
IMULQ R9, R10
- SHRQ $0x30, R10
+ SHRQ $0x2f, R10
MOVL 24(SP)(R10*4), SI
INCL CX
MOVL CX, 24(SP)(R10*4)
@@ -4885,35 +5873,57 @@ match_dst_size_check_encodeBetterBlockAsm:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm
matchlen_loopback_match_nolit_encodeBetterBlockAsm:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm
matchlen_loop_match_nolit_encodeBetterBlockAsm:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm
-
-matchlen_single_match_nolit_encodeBetterBlockAsm:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm:
+ JZ match_nolit_end_encodeBetterBlockAsm
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm
match_nolit_end_encodeBetterBlockAsm:
MOVL CX, R8
@@ -5179,6 +6189,90 @@ four_bytes_remain_match_nolit_encodeBetterBlockAsm:
two_byte_offset_match_nolit_encodeBetterBlockAsm:
CMPL R12, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm
+ CMPL R8, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(AX)
+ MOVL R8, R9
+ SHRL $0x08, R9
+ SHLL $0x05, R9
+ ORL R9, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R12
+
+ // emitRepeat
+ LEAL -4(R12), R12
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
+ JLE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL R8, $0x00000800
+ JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ CMPL R12, $0x00000104
+ JLT repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL R12, $0x00010100
+ JLT repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ CMPL R12, $0x0100ffff
+ JLT repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+ LEAL -16842747(R12), R12
+ MOVW $0x001d, (AX)
+ MOVW $0xfffb, 2(AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b
+
+repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (AX)
+ MOVW R12, 2(AX)
+ SARL $0x10, R8
+ MOVB R8, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -256(R12), R12
+ MOVW $0x0019, (AX)
+ MOVW R12, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ LEAL -4(R12), R12
+ MOVW $0x0015, (AX)
+ MOVB R12, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(AX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm
+
+long_offset_short_match_nolit_encodeBetterBlockAsm:
MOVB $0xee, (AX)
MOVW R8, 1(AX)
LEAL -60(R12), R12
@@ -5505,52 +6599,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm:
match_nolit_dst_ok_encodeBetterBlockAsm:
MOVQ $0x00cf1bbcdcbfa563, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x08, R10
IMULQ SI, R10
- SHRQ $0x30, R10
- SHLQ $0x08, R13
- IMULQ SI, R13
- SHRQ $0x30, R13
+ SHRQ $0x2f, R10
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x32, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x32, R12
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 262168(SP)(R11*4)
- MOVL R15, 262168(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 524312(SP)(R11*4)
+ MOVL R14, 524312(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeBetterBlockAsm:
+ CMPQ DI, R9
+ JAE search_loop_encodeBetterBlockAsm
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x08, R8
+ IMULQ SI, R8
+ SHRQ $0x2f, R8
SHLQ $0x08, R10
IMULQ SI, R10
- SHRQ $0x30, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x32, R11
- SHLQ $0x08, R13
- IMULQ SI, R13
- SHRQ $0x30, R13
+ SHRQ $0x2f, R10
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 262168(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeBetterBlockAsm
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeBetterBlockAsm
emit_remainder_encodeBetterBlockAsm:
MOVQ src_len+32(FP), CX
@@ -5617,8 +6708,9 @@ memmove_emit_remainder_encodeBetterBlockAsm:
MOVL SI, BX
// genMemMoveShort
- CMPQ BX, $0x04
- JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3
CMPQ BX, $0x08
JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7
CMPQ BX, $0x10
@@ -5627,9 +6719,18 @@ memmove_emit_remainder_encodeBetterBlockAsm:
JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4:
- MOVL (CX), SI
- MOVL SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7:
@@ -5719,10 +6820,10 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm:
RET
// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
-// Requires: SSE2
-TEXT ·encodeBetterBlockAsm4MB(SB), $327704-56
+// Requires: BMI, SSE2
+TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56
MOVQ dst_base+0(FP), AX
- MOVQ $0x00000a00, CX
+ MOVQ $0x00001200, CX
LEAQ 24(SP), DX
PXOR X0, X0
@@ -5774,27 +6875,37 @@ check_maxskip_cont_encodeBetterBlockAsm4MB:
MOVQ DI, R11
SHLQ $0x08, R10
IMULQ R9, R10
- SHRQ $0x30, R10
+ SHRQ $0x2f, R10
SHLQ $0x20, R11
IMULQ SI, R11
SHRQ $0x32, R11
MOVL 24(SP)(R10*4), SI
- MOVL 262168(SP)(R11*4), R8
+ MOVL 524312(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
- MOVL CX, 262168(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 524312(SP)(R11*4)
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm4MB
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeBetterBlockAsm4MB
- MOVL 20(SP), CX
- JMP search_loop_encodeBetterBlockAsm4MB
+ CMPQ R11, DI
+ JNE no_short_found_encodeBetterBlockAsm4MB
+ MOVL R8, SI
+ JMP candidate_match_encodeBetterBlockAsm4MB
+
+no_short_found_encodeBetterBlockAsm4MB:
+ CMPL R10, DI
+ JEQ candidate_match_encodeBetterBlockAsm4MB
+ CMPL R11, DI
+ JEQ candidateS_match_encodeBetterBlockAsm4MB
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm4MB
candidateS_match_encodeBetterBlockAsm4MB:
SHRQ $0x08, DI
MOVQ DI, R10
SHLQ $0x08, R10
IMULQ R9, R10
- SHRQ $0x30, R10
+ SHRQ $0x2f, R10
MOVL 24(SP)(R10*4), SI
INCL CX
MOVL CX, 24(SP)(R10*4)
@@ -5841,35 +6952,57 @@ match_dst_size_check_encodeBetterBlockAsm4MB:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm4MB
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm4MB
matchlen_loop_match_nolit_encodeBetterBlockAsm4MB:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB
-
-matchlen_single_match_nolit_encodeBetterBlockAsm4MB:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm4MB
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm4MB:
+ JZ match_nolit_end_encodeBetterBlockAsm4MB
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm4MB
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm4MB
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm4MB
match_nolit_end_encodeBetterBlockAsm4MB:
MOVL CX, R8
@@ -6116,6 +7249,77 @@ four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB:
two_byte_offset_match_nolit_encodeBetterBlockAsm4MB:
CMPL R12, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB
+ CMPL R8, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R12
+
+ // emitRepeat
+ LEAL -4(R12), R12
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
+ JLE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL R8, $0x00000800
+ JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ CMPL R12, $0x00000104
+ JLT repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ CMPL R12, $0x00010100
+ JLT repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b
+ LEAL -65536(R12), R12
+ MOVL R12, R8
+ MOVW $0x001d, (AX)
+ MOVW R12, 2(AX)
+ SARL $0x10, R8
+ MOVB R8, 4(AX)
+ ADDQ $0x05, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ LEAL -256(R12), R12
+ MOVW $0x0019, (AX)
+ MOVW R12, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ LEAL -4(R12), R12
+ MOVW $0x0015, (AX)
+ MOVB R12, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(AX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB
+
+long_offset_short_match_nolit_encodeBetterBlockAsm4MB:
MOVB $0xee, (AX)
MOVW R8, 1(AX)
LEAL -60(R12), R12
@@ -6412,52 +7616,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB:
match_nolit_dst_ok_encodeBetterBlockAsm4MB:
MOVQ $0x00cf1bbcdcbfa563, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x08, R10
IMULQ SI, R10
- SHRQ $0x30, R10
- SHLQ $0x08, R13
- IMULQ SI, R13
- SHRQ $0x30, R13
+ SHRQ $0x2f, R10
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x32, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x32, R12
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 262168(SP)(R11*4)
- MOVL R15, 262168(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 524312(SP)(R11*4)
+ MOVL R14, 524312(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeBetterBlockAsm4MB:
+ CMPQ DI, R9
+ JAE search_loop_encodeBetterBlockAsm4MB
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x08, R8
+ IMULQ SI, R8
+ SHRQ $0x2f, R8
SHLQ $0x08, R10
IMULQ SI, R10
- SHRQ $0x30, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x32, R11
- SHLQ $0x08, R13
- IMULQ SI, R13
- SHRQ $0x30, R13
+ SHRQ $0x2f, R10
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 262168(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeBetterBlockAsm4MB
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeBetterBlockAsm4MB
emit_remainder_encodeBetterBlockAsm4MB:
MOVQ src_len+32(FP), CX
@@ -6516,8 +7717,9 @@ memmove_emit_remainder_encodeBetterBlockAsm4MB:
MOVL SI, BX
// genMemMoveShort
- CMPQ BX, $0x04
- JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3
CMPQ BX, $0x08
JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7
CMPQ BX, $0x10
@@ -6526,9 +7728,18 @@ memmove_emit_remainder_encodeBetterBlockAsm4MB:
JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4:
- MOVL (CX), SI
- MOVL SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7:
@@ -6618,7 +7829,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB:
RET
// func encodeBetterBlockAsm12B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBetterBlockAsm12B(SB), $81944-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000280, CX
@@ -6673,12 +7884,22 @@ search_loop_encodeBetterBlockAsm12B:
MOVL 65560(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
MOVL CX, 65560(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm12B
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeBetterBlockAsm12B
- MOVL 20(SP), CX
- JMP search_loop_encodeBetterBlockAsm12B
+ CMPQ R11, DI
+ JNE no_short_found_encodeBetterBlockAsm12B
+ MOVL R8, SI
+ JMP candidate_match_encodeBetterBlockAsm12B
+
+no_short_found_encodeBetterBlockAsm12B:
+ CMPL R10, DI
+ JEQ candidate_match_encodeBetterBlockAsm12B
+ CMPL R11, DI
+ JEQ candidateS_match_encodeBetterBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm12B
candidateS_match_encodeBetterBlockAsm12B:
SHRQ $0x08, DI
@@ -6732,35 +7953,57 @@ match_dst_size_check_encodeBetterBlockAsm12B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm12B
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm12B
matchlen_loopback_match_nolit_encodeBetterBlockAsm12B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm12B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm12B
matchlen_loop_match_nolit_encodeBetterBlockAsm12B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B
-
-matchlen_single_match_nolit_encodeBetterBlockAsm12B:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm12B
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm12B:
+ JZ match_nolit_end_encodeBetterBlockAsm12B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm12B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm12B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm12B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm12B
match_nolit_end_encodeBetterBlockAsm12B:
MOVL CX, R8
@@ -6907,6 +8150,65 @@ emit_literal_done_match_emit_encodeBetterBlockAsm12B:
two_byte_offset_match_nolit_encodeBetterBlockAsm12B:
CMPL R12, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B
+ CMPL R8, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R12
+
+ // emitRepeat
+ LEAL -4(R12), R12
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
+ JLE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ CMPL R8, $0x00000800
+ JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ CMPL R12, $0x00000104
+ JLT repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b
+ LEAL -256(R12), R12
+ MOVW $0x0019, (AX)
+ MOVW R12, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ LEAL -4(R12), R12
+ MOVW $0x0015, (AX)
+ MOVB R12, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(AX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm12B:
MOVB $0xee, (AX)
MOVW R8, 1(AX)
LEAL -60(R12), R12
@@ -7168,52 +8470,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm12B:
match_nolit_dst_ok_encodeBetterBlockAsm12B:
MOVQ $0x0000cf1bbcdcbf9b, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x32, R10
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x32, R13
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x34, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x34, R12
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x32, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x34, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 65560(SP)(R11*4)
- MOVL R15, 65560(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 65560(SP)(R11*4)
+ MOVL R14, 65560(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeBetterBlockAsm12B:
+ CMPQ DI, R9
+ JAE search_loop_encodeBetterBlockAsm12B
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x10, R8
+ IMULQ SI, R8
+ SHRQ $0x32, R8
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x32, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x34, R11
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x32, R13
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 65560(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeBetterBlockAsm12B
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeBetterBlockAsm12B
emit_remainder_encodeBetterBlockAsm12B:
MOVQ src_len+32(FP), CX
@@ -7261,8 +8560,9 @@ memmove_emit_remainder_encodeBetterBlockAsm12B:
MOVL SI, BX
// genMemMoveShort
- CMPQ BX, $0x04
- JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3
CMPQ BX, $0x08
JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7
CMPQ BX, $0x10
@@ -7271,9 +8571,18 @@ memmove_emit_remainder_encodeBetterBlockAsm12B:
JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4:
- MOVL (CX), SI
- MOVL SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7:
@@ -7363,7 +8672,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B:
RET
// func encodeBetterBlockAsm10B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBetterBlockAsm10B(SB), $20504-56
MOVQ dst_base+0(FP), AX
MOVQ $0x000000a0, CX
@@ -7418,12 +8727,22 @@ search_loop_encodeBetterBlockAsm10B:
MOVL 16408(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
MOVL CX, 16408(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm10B
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeBetterBlockAsm10B
- MOVL 20(SP), CX
- JMP search_loop_encodeBetterBlockAsm10B
+ CMPQ R11, DI
+ JNE no_short_found_encodeBetterBlockAsm10B
+ MOVL R8, SI
+ JMP candidate_match_encodeBetterBlockAsm10B
+
+no_short_found_encodeBetterBlockAsm10B:
+ CMPL R10, DI
+ JEQ candidate_match_encodeBetterBlockAsm10B
+ CMPL R11, DI
+ JEQ candidateS_match_encodeBetterBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm10B
candidateS_match_encodeBetterBlockAsm10B:
SHRQ $0x08, DI
@@ -7477,35 +8796,57 @@ match_dst_size_check_encodeBetterBlockAsm10B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm10B
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm10B
matchlen_loopback_match_nolit_encodeBetterBlockAsm10B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm10B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm10B
matchlen_loop_match_nolit_encodeBetterBlockAsm10B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B
-
-matchlen_single_match_nolit_encodeBetterBlockAsm10B:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm10B
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm10B:
+ JZ match_nolit_end_encodeBetterBlockAsm10B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm10B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm10B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm10B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm10B
match_nolit_end_encodeBetterBlockAsm10B:
MOVL CX, R8
@@ -7652,6 +8993,65 @@ emit_literal_done_match_emit_encodeBetterBlockAsm10B:
two_byte_offset_match_nolit_encodeBetterBlockAsm10B:
CMPL R12, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B
+ CMPL R8, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R12
+
+ // emitRepeat
+ LEAL -4(R12), R12
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
+ JLE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ CMPL R8, $0x00000800
+ JLT repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ CMPL R12, $0x00000104
+ JLT repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b
+ LEAL -256(R12), R12
+ MOVW $0x0019, (AX)
+ MOVW R12, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ LEAL -4(R12), R12
+ MOVW $0x0015, (AX)
+ MOVB R12, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(AX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm10B:
MOVB $0xee, (AX)
MOVW R8, 1(AX)
LEAL -60(R12), R12
@@ -7913,52 +9313,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm10B:
match_nolit_dst_ok_encodeBetterBlockAsm10B:
MOVQ $0x0000cf1bbcdcbf9b, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x34, R10
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x34, R13
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x36, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x36, R12
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x34, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x36, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 16408(SP)(R11*4)
- MOVL R15, 16408(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 16408(SP)(R11*4)
+ MOVL R14, 16408(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeBetterBlockAsm10B:
+ CMPQ DI, R9
+ JAE search_loop_encodeBetterBlockAsm10B
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x10, R8
+ IMULQ SI, R8
+ SHRQ $0x34, R8
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x34, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x36, R11
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x34, R13
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 16408(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeBetterBlockAsm10B
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeBetterBlockAsm10B
emit_remainder_encodeBetterBlockAsm10B:
MOVQ src_len+32(FP), CX
@@ -8006,8 +9403,9 @@ memmove_emit_remainder_encodeBetterBlockAsm10B:
MOVL SI, BX
// genMemMoveShort
- CMPQ BX, $0x04
- JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3
CMPQ BX, $0x08
JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7
CMPQ BX, $0x10
@@ -8016,9 +9414,18 @@ memmove_emit_remainder_encodeBetterBlockAsm10B:
JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4:
- MOVL (CX), SI
- MOVL SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7:
@@ -8108,7 +9515,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B:
RET
// func encodeBetterBlockAsm8B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBetterBlockAsm8B(SB), $5144-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000028, CX
@@ -8163,12 +9570,22 @@ search_loop_encodeBetterBlockAsm8B:
MOVL 4120(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
MOVL CX, 4120(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeBetterBlockAsm8B
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeBetterBlockAsm8B
- MOVL 20(SP), CX
- JMP search_loop_encodeBetterBlockAsm8B
+ CMPQ R11, DI
+ JNE no_short_found_encodeBetterBlockAsm8B
+ MOVL R8, SI
+ JMP candidate_match_encodeBetterBlockAsm8B
+
+no_short_found_encodeBetterBlockAsm8B:
+ CMPL R10, DI
+ JEQ candidate_match_encodeBetterBlockAsm8B
+ CMPL R11, DI
+ JEQ candidateS_match_encodeBetterBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeBetterBlockAsm8B
candidateS_match_encodeBetterBlockAsm8B:
SHRQ $0x08, DI
@@ -8222,35 +9639,57 @@ match_dst_size_check_encodeBetterBlockAsm8B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm8B
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm8B
matchlen_loopback_match_nolit_encodeBetterBlockAsm8B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm8B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm8B
matchlen_loop_match_nolit_encodeBetterBlockAsm8B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B
-
-matchlen_single_match_nolit_encodeBetterBlockAsm8B:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm8B
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm8B:
+ JZ match_nolit_end_encodeBetterBlockAsm8B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm8B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm8B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm8B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm8B
match_nolit_end_encodeBetterBlockAsm8B:
MOVL CX, R8
@@ -8397,6 +9836,61 @@ emit_literal_done_match_emit_encodeBetterBlockAsm8B:
two_byte_offset_match_nolit_encodeBetterBlockAsm8B:
CMPL R12, $0x40
JLE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B
+ CMPL R8, $0x00000800
+ JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB R8, 1(AX)
+ SHRL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, AX
+ SUBL $0x08, R12
+
+ // emitRepeat
+ LEAL -4(R12), R12
+ JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ MOVL R12, SI
+ LEAL -4(R12), R12
+ CMPL SI, $0x08
+ JLE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+
+cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ CMPL R12, $0x00000104
+ JLT repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b
+ LEAL -256(R12), R12
+ MOVW $0x0019, (AX)
+ MOVW R12, 2(AX)
+ ADDQ $0x04, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ LEAL -4(R12), R12
+ MOVW $0x0015, (AX)
+ MOVB R12, 2(AX)
+ ADDQ $0x03, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b:
+ SHLL $0x02, R12
+ ORL $0x01, R12
+ MOVW R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+ XORQ SI, SI
+ LEAL 1(SI)(R12*4), R12
+ MOVB R8, 1(AX)
+ SARL $0x08, R8
+ SHLL $0x05, R8
+ ORL R8, R12
+ MOVB R12, (AX)
+ ADDQ $0x02, AX
+ JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B
+
+long_offset_short_match_nolit_encodeBetterBlockAsm8B:
MOVB $0xee, (AX)
MOVW R8, 1(AX)
LEAL -60(R12), R12
@@ -8648,52 +10142,49 @@ match_nolit_emitcopy_end_encodeBetterBlockAsm8B:
match_nolit_dst_ok_encodeBetterBlockAsm8B:
MOVQ $0x0000cf1bbcdcbf9b, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x36, R10
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x36, R13
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x38, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x38, R12
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x36, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x38, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 4120(SP)(R11*4)
- MOVL R15, 4120(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 4120(SP)(R11*4)
+ MOVL R14, 4120(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeBetterBlockAsm8B:
+ CMPQ DI, R9
+ JAE search_loop_encodeBetterBlockAsm8B
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x10, R8
+ IMULQ SI, R8
+ SHRQ $0x36, R8
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x36, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x38, R11
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x36, R13
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 4120(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeBetterBlockAsm8B
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeBetterBlockAsm8B
emit_remainder_encodeBetterBlockAsm8B:
MOVQ src_len+32(FP), CX
@@ -8741,8 +10232,9 @@ memmove_emit_remainder_encodeBetterBlockAsm8B:
MOVL SI, BX
// genMemMoveShort
- CMPQ BX, $0x04
- JLE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3
CMPQ BX, $0x08
JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7
CMPQ BX, $0x10
@@ -8751,9 +10243,18 @@ memmove_emit_remainder_encodeBetterBlockAsm8B:
JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4:
- MOVL (CX), SI
- MOVL SI, (AX)
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7:
@@ -8843,7 +10344,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B:
RET
// func encodeSnappyBlockAsm(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm(SB), $65560-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000200, CX
@@ -9079,35 +10580,57 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_loop_repeat_extend_encodeSnappyBlockAsm:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm
repeat_extend_forward_end_encodeSnappyBlockAsm:
ADDL R11, CX
@@ -9380,35 +10903,57 @@ match_nolit_loop_encodeSnappyBlockAsm:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm
matchlen_loopback_match_nolit_encodeSnappyBlockAsm:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm
matchlen_loop_match_nolit_encodeSnappyBlockAsm:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm:
+ JZ match_nolit_end_encodeSnappyBlockAsm
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm
match_nolit_end_encodeSnappyBlockAsm:
ADDL R10, CX
@@ -9567,17 +11112,36 @@ memmove_emit_remainder_encodeSnappyBlockAsm:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16:
@@ -9660,7 +11224,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm:
RET
// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000200, CX
@@ -9877,35 +11441,57 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm64K
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm64K:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm64K:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm64K
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm64K
repeat_extend_forward_end_encodeSnappyBlockAsm64K:
ADDL R11, CX
@@ -10135,35 +11721,57 @@ match_nolit_loop_encodeSnappyBlockAsm64K:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm64K
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm64K
matchlen_loop_match_nolit_encodeSnappyBlockAsm64K:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm64K:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm64K
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm64K:
+ JZ match_nolit_end_encodeSnappyBlockAsm64K
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm64K
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm64K
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm64K
match_nolit_end_encodeSnappyBlockAsm64K:
ADDL R10, CX
@@ -10279,17 +11887,36 @@ memmove_emit_remainder_encodeSnappyBlockAsm64K:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16:
@@ -10372,7 +11999,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K:
RET
// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000080, CX
@@ -10589,35 +12216,57 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm12B
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm12B:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm12B:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm12B
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm12B
repeat_extend_forward_end_encodeSnappyBlockAsm12B:
ADDL R11, CX
@@ -10847,35 +12496,57 @@ match_nolit_loop_encodeSnappyBlockAsm12B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm12B
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm12B
matchlen_loop_match_nolit_encodeSnappyBlockAsm12B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm12B:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm12B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm12B:
+ JZ match_nolit_end_encodeSnappyBlockAsm12B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm12B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm12B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm12B
match_nolit_end_encodeSnappyBlockAsm12B:
ADDL R10, CX
@@ -10991,17 +12662,36 @@ memmove_emit_remainder_encodeSnappyBlockAsm12B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16:
@@ -11084,7 +12774,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B:
RET
// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000020, CX
@@ -11301,35 +12991,57 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm10B
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm10B:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm10B:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm10B
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm10B
repeat_extend_forward_end_encodeSnappyBlockAsm10B:
ADDL R11, CX
@@ -11559,35 +13271,57 @@ match_nolit_loop_encodeSnappyBlockAsm10B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm10B
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm10B
matchlen_loop_match_nolit_encodeSnappyBlockAsm10B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm10B:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm10B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm10B:
+ JZ match_nolit_end_encodeSnappyBlockAsm10B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm10B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm10B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm10B
match_nolit_end_encodeSnappyBlockAsm10B:
ADDL R10, CX
@@ -11703,17 +13437,36 @@ memmove_emit_remainder_encodeSnappyBlockAsm10B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16:
@@ -11796,7 +13549,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B:
RET
// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000008, CX
@@ -12013,35 +13766,57 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm8B
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm8B:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm8B:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm8B
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm8B
repeat_extend_forward_end_encodeSnappyBlockAsm8B:
ADDL R11, CX
@@ -12269,35 +14044,57 @@ match_nolit_loop_encodeSnappyBlockAsm8B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm8B
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm8B
matchlen_loop_match_nolit_encodeSnappyBlockAsm8B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm8B:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm8B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm8B:
+ JZ match_nolit_end_encodeSnappyBlockAsm8B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm8B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm8B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm8B
match_nolit_end_encodeSnappyBlockAsm8B:
ADDL R10, CX
@@ -12411,17 +14208,36 @@ memmove_emit_remainder_encodeSnappyBlockAsm8B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16:
@@ -12504,10 +14320,10 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B:
RET
// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
-// Requires: SSE2
-TEXT ·encodeSnappyBetterBlockAsm(SB), $327704-56
+// Requires: BMI, SSE2
+TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56
MOVQ dst_base+0(FP), AX
- MOVQ $0x00000a00, CX
+ MOVQ $0x00001200, CX
LEAQ 24(SP), DX
PXOR X0, X0
@@ -12559,27 +14375,37 @@ check_maxskip_cont_encodeSnappyBetterBlockAsm:
MOVQ DI, R11
SHLQ $0x08, R10
IMULQ R9, R10
- SHRQ $0x30, R10
+ SHRQ $0x2f, R10
SHLQ $0x20, R11
IMULQ SI, R11
SHRQ $0x32, R11
MOVL 24(SP)(R10*4), SI
- MOVL 262168(SP)(R11*4), R8
+ MOVL 524312(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
- MOVL CX, 262168(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVL CX, 524312(SP)(R11*4)
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeSnappyBetterBlockAsm
- MOVL 20(SP), CX
- JMP search_loop_encodeSnappyBetterBlockAsm
+ CMPQ R11, DI
+ JNE no_short_found_encodeSnappyBetterBlockAsm
+ MOVL R8, SI
+ JMP candidate_match_encodeSnappyBetterBlockAsm
+
+no_short_found_encodeSnappyBetterBlockAsm:
+ CMPL R10, DI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm
+ CMPL R11, DI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm
candidateS_match_encodeSnappyBetterBlockAsm:
SHRQ $0x08, DI
MOVQ DI, R10
SHLQ $0x08, R10
IMULQ R9, R10
- SHRQ $0x30, R10
+ SHRQ $0x2f, R10
MOVL 24(SP)(R10*4), SI
INCL CX
MOVL CX, 24(SP)(R10*4)
@@ -12626,35 +14452,57 @@ match_dst_size_check_encodeSnappyBetterBlockAsm:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm
match_nolit_end_encodeSnappyBetterBlockAsm:
MOVL CX, R8
@@ -12881,52 +14729,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm:
match_nolit_dst_ok_encodeSnappyBetterBlockAsm:
MOVQ $0x00cf1bbcdcbfa563, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x08, R10
IMULQ SI, R10
- SHRQ $0x30, R10
- SHLQ $0x08, R13
- IMULQ SI, R13
- SHRQ $0x30, R13
+ SHRQ $0x2f, R10
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x32, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x32, R12
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x2f, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 262168(SP)(R11*4)
- MOVL R15, 262168(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 524312(SP)(R11*4)
+ MOVL R14, 524312(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeSnappyBetterBlockAsm:
+ CMPQ DI, R9
+ JAE search_loop_encodeSnappyBetterBlockAsm
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x08, R8
+ IMULQ SI, R8
+ SHRQ $0x2f, R8
SHLQ $0x08, R10
IMULQ SI, R10
- SHRQ $0x30, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x32, R11
- SHLQ $0x08, R13
- IMULQ SI, R13
- SHRQ $0x30, R13
+ SHRQ $0x2f, R10
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 262168(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeSnappyBetterBlockAsm
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeSnappyBetterBlockAsm
emit_remainder_encodeSnappyBetterBlockAsm:
MOVQ src_len+32(FP), CX
@@ -12993,17 +14838,36 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16:
@@ -13086,7 +14950,7 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm:
RET
// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000a00, CX
@@ -13141,12 +15005,22 @@ search_loop_encodeSnappyBetterBlockAsm64K:
MOVL 262168(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
MOVL CX, 262168(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm64K
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeSnappyBetterBlockAsm64K
- MOVL 20(SP), CX
- JMP search_loop_encodeSnappyBetterBlockAsm64K
+ CMPQ R11, DI
+ JNE no_short_found_encodeSnappyBetterBlockAsm64K
+ MOVL R8, SI
+ JMP candidate_match_encodeSnappyBetterBlockAsm64K
+
+no_short_found_encodeSnappyBetterBlockAsm64K:
+ CMPL R10, DI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm64K
+ CMPL R11, DI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm64K
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm64K
candidateS_match_encodeSnappyBetterBlockAsm64K:
SHRQ $0x08, DI
@@ -13200,35 +15074,57 @@ match_dst_size_check_encodeSnappyBetterBlockAsm64K:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm64K
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm64K:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm64K:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm64K
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm64K
match_nolit_end_encodeSnappyBetterBlockAsm64K:
MOVL CX, R8
@@ -13403,52 +15299,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K:
match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K:
MOVQ $0x00cf1bbcdcbfa563, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x08, R10
IMULQ SI, R10
SHRQ $0x30, R10
- SHLQ $0x08, R13
- IMULQ SI, R13
- SHRQ $0x30, R13
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x32, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x32, R12
+ SHLQ $0x08, R12
+ IMULQ SI, R12
+ SHRQ $0x30, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x32, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 262168(SP)(R11*4)
- MOVL R15, 262168(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 262168(SP)(R11*4)
+ MOVL R14, 262168(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeSnappyBetterBlockAsm64K:
+ CMPQ DI, R9
+ JAE search_loop_encodeSnappyBetterBlockAsm64K
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x08, R8
+ IMULQ SI, R8
+ SHRQ $0x30, R8
SHLQ $0x08, R10
IMULQ SI, R10
SHRQ $0x30, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x32, R11
- SHLQ $0x08, R13
- IMULQ SI, R13
- SHRQ $0x30, R13
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 262168(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeSnappyBetterBlockAsm64K
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeSnappyBetterBlockAsm64K
emit_remainder_encodeSnappyBetterBlockAsm64K:
MOVQ src_len+32(FP), CX
@@ -13496,17 +15389,36 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm64K:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16:
@@ -13589,7 +15501,7 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K:
RET
// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000280, CX
@@ -13644,12 +15556,22 @@ search_loop_encodeSnappyBetterBlockAsm12B:
MOVL 65560(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
MOVL CX, 65560(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm12B
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeSnappyBetterBlockAsm12B
- MOVL 20(SP), CX
- JMP search_loop_encodeSnappyBetterBlockAsm12B
+ CMPQ R11, DI
+ JNE no_short_found_encodeSnappyBetterBlockAsm12B
+ MOVL R8, SI
+ JMP candidate_match_encodeSnappyBetterBlockAsm12B
+
+no_short_found_encodeSnappyBetterBlockAsm12B:
+ CMPL R10, DI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm12B
+ CMPL R11, DI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm12B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm12B
candidateS_match_encodeSnappyBetterBlockAsm12B:
SHRQ $0x08, DI
@@ -13703,35 +15625,57 @@ match_dst_size_check_encodeSnappyBetterBlockAsm12B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm12B
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm12B:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm12B:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm12B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm12B
match_nolit_end_encodeSnappyBetterBlockAsm12B:
MOVL CX, R8
@@ -13906,52 +15850,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B:
match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B:
MOVQ $0x0000cf1bbcdcbf9b, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x32, R10
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x32, R13
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x34, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x34, R12
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x32, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x34, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 65560(SP)(R11*4)
- MOVL R15, 65560(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 65560(SP)(R11*4)
+ MOVL R14, 65560(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeSnappyBetterBlockAsm12B:
+ CMPQ DI, R9
+ JAE search_loop_encodeSnappyBetterBlockAsm12B
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x10, R8
+ IMULQ SI, R8
+ SHRQ $0x32, R8
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x32, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x34, R11
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x32, R13
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 65560(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeSnappyBetterBlockAsm12B
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeSnappyBetterBlockAsm12B
emit_remainder_encodeSnappyBetterBlockAsm12B:
MOVQ src_len+32(FP), CX
@@ -13999,17 +15940,36 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm12B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16:
@@ -14092,7 +16052,7 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B:
RET
// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56
MOVQ dst_base+0(FP), AX
MOVQ $0x000000a0, CX
@@ -14147,12 +16107,22 @@ search_loop_encodeSnappyBetterBlockAsm10B:
MOVL 16408(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
MOVL CX, 16408(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm10B
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeSnappyBetterBlockAsm10B
- MOVL 20(SP), CX
- JMP search_loop_encodeSnappyBetterBlockAsm10B
+ CMPQ R11, DI
+ JNE no_short_found_encodeSnappyBetterBlockAsm10B
+ MOVL R8, SI
+ JMP candidate_match_encodeSnappyBetterBlockAsm10B
+
+no_short_found_encodeSnappyBetterBlockAsm10B:
+ CMPL R10, DI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm10B
+ CMPL R11, DI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm10B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm10B
candidateS_match_encodeSnappyBetterBlockAsm10B:
SHRQ $0x08, DI
@@ -14206,35 +16176,57 @@ match_dst_size_check_encodeSnappyBetterBlockAsm10B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm10B
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm10B:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm10B:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm10B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm10B
match_nolit_end_encodeSnappyBetterBlockAsm10B:
MOVL CX, R8
@@ -14409,52 +16401,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B:
match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B:
MOVQ $0x0000cf1bbcdcbf9b, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x34, R10
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x34, R13
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x36, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x36, R12
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x34, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x36, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 16408(SP)(R11*4)
- MOVL R15, 16408(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 16408(SP)(R11*4)
+ MOVL R14, 16408(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeSnappyBetterBlockAsm10B:
+ CMPQ DI, R9
+ JAE search_loop_encodeSnappyBetterBlockAsm10B
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x10, R8
+ IMULQ SI, R8
+ SHRQ $0x34, R8
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x34, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x36, R11
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x34, R13
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 16408(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeSnappyBetterBlockAsm10B
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeSnappyBetterBlockAsm10B
emit_remainder_encodeSnappyBetterBlockAsm10B:
MOVQ src_len+32(FP), CX
@@ -14502,17 +16491,36 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm10B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16:
@@ -14595,7 +16603,7 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B:
RET
// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000028, CX
@@ -14650,12 +16658,22 @@ search_loop_encodeSnappyBetterBlockAsm8B:
MOVL 4120(SP)(R11*4), R8
MOVL CX, 24(SP)(R10*4)
MOVL CX, 4120(SP)(R11*4)
- CMPL (DX)(SI*1), DI
+ MOVQ (DX)(SI*1), R10
+ MOVQ (DX)(R8*1), R11
+ CMPQ R10, DI
JEQ candidate_match_encodeSnappyBetterBlockAsm8B
- CMPL (DX)(R8*1), DI
- JEQ candidateS_match_encodeSnappyBetterBlockAsm8B
- MOVL 20(SP), CX
- JMP search_loop_encodeSnappyBetterBlockAsm8B
+ CMPQ R11, DI
+ JNE no_short_found_encodeSnappyBetterBlockAsm8B
+ MOVL R8, SI
+ JMP candidate_match_encodeSnappyBetterBlockAsm8B
+
+no_short_found_encodeSnappyBetterBlockAsm8B:
+ CMPL R10, DI
+ JEQ candidate_match_encodeSnappyBetterBlockAsm8B
+ CMPL R11, DI
+ JEQ candidateS_match_encodeSnappyBetterBlockAsm8B
+ MOVL 20(SP), CX
+ JMP search_loop_encodeSnappyBetterBlockAsm8B
candidateS_match_encodeSnappyBetterBlockAsm8B:
SHRQ $0x08, DI
@@ -14709,35 +16727,57 @@ match_dst_size_check_encodeSnappyBetterBlockAsm8B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm8B
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm8B:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm8B:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm8B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm8B
match_nolit_end_encodeSnappyBetterBlockAsm8B:
MOVL CX, R8
@@ -14910,52 +16950,49 @@ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B:
match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B:
MOVQ $0x0000cf1bbcdcbf9b, SI
MOVQ $0x9e3779b1, R8
- INCL DI
- MOVQ (DX)(DI*1), R9
- MOVQ R9, R10
- MOVQ R9, R11
- MOVQ R9, R12
- SHRQ $0x08, R11
- MOVQ R11, R13
- SHRQ $0x10, R12
- LEAL 1(DI), R14
- LEAL 2(DI), R15
- MOVQ -2(DX)(CX*1), R9
+ LEAQ 1(DI), DI
+ LEAQ -2(CX), R9
+ MOVQ (DX)(DI*1), R10
+ MOVQ 1(DX)(DI*1), R11
+ MOVQ (DX)(R9*1), R12
+ MOVQ 1(DX)(R9*1), R13
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x36, R10
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x36, R13
SHLQ $0x20, R11
IMULQ R8, R11
SHRQ $0x38, R11
- SHLQ $0x20, R12
- IMULQ R8, R12
- SHRQ $0x38, R12
+ SHLQ $0x10, R12
+ IMULQ SI, R12
+ SHRQ $0x36, R12
+ SHLQ $0x20, R13
+ IMULQ R8, R13
+ SHRQ $0x38, R13
+ LEAQ 1(DI), R8
+ LEAQ 1(R9), R14
MOVL DI, 24(SP)(R10*4)
- MOVL R14, 24(SP)(R13*4)
- MOVL R14, 4120(SP)(R11*4)
- MOVL R15, 4120(SP)(R12*4)
- MOVQ R9, R10
- MOVQ R9, R11
- SHRQ $0x08, R11
- MOVQ R11, R13
- LEAL -2(CX), R9
- LEAL -1(CX), DI
+ MOVL R9, 24(SP)(R12*4)
+ MOVL R8, 4120(SP)(R11*4)
+ MOVL R14, 4120(SP)(R13*4)
+ ADDQ $0x01, DI
+ SUBQ $0x01, R9
+
+index_loop_encodeSnappyBetterBlockAsm8B:
+ CMPQ DI, R9
+ JAE search_loop_encodeSnappyBetterBlockAsm8B
+ MOVQ (DX)(DI*1), R8
+ MOVQ (DX)(R9*1), R10
+ SHLQ $0x10, R8
+ IMULQ SI, R8
+ SHRQ $0x36, R8
SHLQ $0x10, R10
IMULQ SI, R10
SHRQ $0x36, R10
- SHLQ $0x20, R11
- IMULQ R8, R11
- SHRQ $0x38, R11
- SHLQ $0x10, R13
- IMULQ SI, R13
- SHRQ $0x36, R13
+ MOVL DI, 24(SP)(R8*4)
MOVL R9, 24(SP)(R10*4)
- MOVL DI, 4120(SP)(R11*4)
- MOVL DI, 24(SP)(R13*4)
- JMP search_loop_encodeSnappyBetterBlockAsm8B
+ ADDQ $0x02, DI
+ SUBQ $0x02, R9
+ JMP index_loop_encodeSnappyBetterBlockAsm8B
emit_remainder_encodeSnappyBetterBlockAsm8B:
MOVQ src_len+32(FP), CX
@@ -15003,17 +17040,36 @@ memmove_emit_remainder_encodeSnappyBetterBlockAsm8B:
MOVL SI, BX
// genMemMoveShort
+ CMPQ BX, $0x03
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2
+ JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3
CMPQ BX, $0x08
- JLE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8
+ JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7
CMPQ BX, $0x10
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16
CMPQ BX, $0x20
JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32
JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64
-emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8:
- MOVQ (CX), SI
- MOVQ SI, (AX)
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2:
+ MOVB (CX), SI
+ MOVB -1(CX)(BX*1), CL
+ MOVB SI, (AX)
+ MOVB CL, -1(AX)(BX*1)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3:
+ MOVW (CX), SI
+ MOVB 2(CX), CL
+ MOVW SI, (AX)
+ MOVB CL, 2(AX)
+ JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
+
+emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7:
+ MOVL (CX), SI
+ MOVL -4(CX)(BX*1), CX
+ MOVL SI, (AX)
+ MOVL CX, -4(AX)(BX*1)
JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B
emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16:
@@ -15454,6 +17510,97 @@ four_bytes_remain_standalone:
two_byte_offset_standalone:
CMPL DX, $0x40
JLE two_byte_offset_short_standalone
+ CMPL CX, $0x00000800
+ JAE long_offset_short_standalone
+ MOVL $0x00000001, SI
+ LEAL 16(SI), SI
+ MOVB CL, 1(AX)
+ MOVL CX, DI
+ SHRL $0x08, DI
+ SHLL $0x05, DI
+ ORL DI, SI
+ MOVB SI, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ SUBL $0x08, DX
+
+ // emitRepeat
+ LEAL -4(DX), DX
+ JMP cant_repeat_two_offset_standalone_emit_copy_short_2b
+
+emit_repeat_again_standalone_emit_copy_short_2b:
+ MOVL DX, SI
+ LEAL -4(DX), DX
+ CMPL SI, $0x08
+ JLE repeat_two_standalone_emit_copy_short_2b
+ CMPL SI, $0x0c
+ JGE cant_repeat_two_offset_standalone_emit_copy_short_2b
+ CMPL CX, $0x00000800
+ JLT repeat_two_offset_standalone_emit_copy_short_2b
+
+cant_repeat_two_offset_standalone_emit_copy_short_2b:
+ CMPL DX, $0x00000104
+ JLT repeat_three_standalone_emit_copy_short_2b
+ CMPL DX, $0x00010100
+ JLT repeat_four_standalone_emit_copy_short_2b
+ CMPL DX, $0x0100ffff
+ JLT repeat_five_standalone_emit_copy_short_2b
+ LEAL -16842747(DX), DX
+ MOVW $0x001d, (AX)
+ MOVW $0xfffb, 2(AX)
+ MOVB $0xff, 4(AX)
+ ADDQ $0x05, AX
+ ADDQ $0x05, BX
+ JMP emit_repeat_again_standalone_emit_copy_short_2b
+
+repeat_five_standalone_emit_copy_short_2b:
+ LEAL -65536(DX), DX
+ MOVL DX, CX
+ MOVW $0x001d, (AX)
+ MOVW DX, 2(AX)
+ SARL $0x10, CX
+ MOVB CL, 4(AX)
+ ADDQ $0x05, BX
+ ADDQ $0x05, AX
+ JMP gen_emit_copy_end
+
+repeat_four_standalone_emit_copy_short_2b:
+ LEAL -256(DX), DX
+ MOVW $0x0019, (AX)
+ MOVW DX, 2(AX)
+ ADDQ $0x04, BX
+ ADDQ $0x04, AX
+ JMP gen_emit_copy_end
+
+repeat_three_standalone_emit_copy_short_2b:
+ LEAL -4(DX), DX
+ MOVW $0x0015, (AX)
+ MOVB DL, 2(AX)
+ ADDQ $0x03, BX
+ ADDQ $0x03, AX
+ JMP gen_emit_copy_end
+
+repeat_two_standalone_emit_copy_short_2b:
+ SHLL $0x02, DX
+ ORL $0x01, DX
+ MOVW DX, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+repeat_two_offset_standalone_emit_copy_short_2b:
+ XORQ SI, SI
+ LEAL 1(SI)(DX*4), DX
+ MOVB CL, 1(AX)
+ SARL $0x08, CX
+ SHLL $0x05, CX
+ ORL CX, DX
+ MOVB DL, (AX)
+ ADDQ $0x02, BX
+ ADDQ $0x02, AX
+ JMP gen_emit_copy_end
+
+long_offset_short_standalone:
MOVB $0xee, (AX)
MOVW CX, 1(AX)
LEAL -60(DX), DX
@@ -15635,6 +17782,7 @@ gen_emit_copy_end_snappy:
RET
// func matchLen(a []byte, b []byte) int
+// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -15643,35 +17791,57 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
// matchLen
XORL SI, SI
CMPL DX, $0x08
- JL matchlen_single_standalone
+ JL matchlen_match4_standalone
matchlen_loopback_standalone:
MOVQ (AX)(SI*1), BX
XORQ (CX)(SI*1), BX
TESTQ BX, BX
JZ matchlen_loop_standalone
- BSFQ BX, BX
- SARQ $0x03, BX
- LEAL (SI)(BX*1), SI
- JMP gen_match_len_end
+
+#ifdef GOAMD64_v3
+ TZCNTQ BX, BX
+
+#else
+ BSFQ BX, BX
+
+#endif
+ SARQ $0x03, BX
+ LEAL (SI)(BX*1), SI
+ JMP gen_match_len_end
matchlen_loop_standalone:
LEAL -8(DX), DX
LEAL 8(SI), SI
CMPL DX, $0x08
JGE matchlen_loopback_standalone
+ JZ gen_match_len_end
-matchlen_single_standalone:
- TESTL DX, DX
- JZ gen_match_len_end
-
-matchlen_single_loopback_standalone:
+matchlen_match4_standalone:
+ CMPL DX, $0x04
+ JL matchlen_match2_standalone
+ MOVL (AX)(SI*1), BX
+ CMPL (CX)(SI*1), BX
+ JNE matchlen_match2_standalone
+ SUBL $0x04, DX
+ LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+ CMPL DX, $0x02
+ JL matchlen_match1_standalone
+ MOVW (AX)(SI*1), BX
+ CMPW (CX)(SI*1), BX
+ JNE matchlen_match1_standalone
+ SUBL $0x02, DX
+ LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+ CMPL DX, $0x01
+ JL gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
LEAL 1(SI), SI
- DECL DX
- JNZ matchlen_single_loopback_standalone
gen_match_len_end:
MOVQ SI, ret+48(FP)
diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go
index fd857682e46d6..dd9ecfe718546 100644
--- a/vendor/github.com/klauspost/compress/s2/index.go
+++ b/vendor/github.com/klauspost/compress/s2/index.go
@@ -10,6 +10,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "sort"
)
const (
@@ -100,6 +101,15 @@ func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err er
if offset > i.TotalUncompressed {
return 0, 0, io.ErrUnexpectedEOF
}
+ if len(i.info) > 200 {
+ n := sort.Search(len(i.info), func(n int) bool {
+ return i.info[n].uncompressedOffset > offset
+ })
+ if n == 0 {
+ n = 1
+ }
+ return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil
+ }
for _, info := range i.info {
if info.uncompressedOffset > offset {
break
@@ -523,3 +533,66 @@ func (i *Index) JSON() []byte {
b, _ := json.MarshalIndent(x, "", " ")
return b
}
+
+// RemoveIndexHeaders will trim all headers and trailers from a given index.
+// This is expected to save 20 bytes.
+// These can be restored using RestoreIndexHeaders.
+// This removes a layer of security, but is the most compact representation.
+// Returns nil if headers contains errors.
+// The returned slice references the provided slice.
+func RemoveIndexHeaders(b []byte) []byte {
+ const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4
+ if len(b) <= save {
+ return nil
+ }
+ if b[0] != ChunkTypeIndex {
+ return nil
+ }
+ chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16
+ b = b[4:]
+
+ // Validate we have enough...
+ if len(b) < chunkLen {
+ return nil
+ }
+ b = b[:chunkLen]
+
+ if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) {
+ return nil
+ }
+ b = b[len(S2IndexHeader):]
+ if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) {
+ return nil
+ }
+ b = bytes.TrimSuffix(b, []byte(S2IndexTrailer))
+
+ if len(b) < 4 {
+ return nil
+ }
+ return b[:len(b)-4]
+}
+
+// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders.
+// No error checking is performed on the input.
+// If a 0 length slice is sent, it is returned without modification.
+func RestoreIndexHeaders(in []byte) []byte {
+ if len(in) == 0 {
+ return in
+ }
+ b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4)
+ b = append(b, ChunkTypeIndex, 0, 0, 0)
+ b = append(b, []byte(S2IndexHeader)...)
+ b = append(b, in...)
+
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer)))
+ b = append(b, tmp[:4]...)
+ // Trailer
+ b = append(b, []byte(S2IndexTrailer)...)
+
+ chunkLen := len(b) - skippableFrameHeader
+ b[1] = uint8(chunkLen >> 0)
+ b[2] = uint8(chunkLen >> 8)
+ b[3] = uint8(chunkLen >> 16)
+ return b
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index c8f0f16fc1ecd..65b38abed805e 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
## Installation
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
@@ -78,6 +80,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is
in the future. So if you want to limit concurrency for future updates, specify the concurrency
you would like.
+If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)`
+which will compress input as each block is completed, blocking on writes until each has completed.
+
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
compression settings can be specified.
@@ -104,7 +109,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
`EncodeAll` will encode all input in src and append it to dst.
-This function can be called concurrently, but each call will only run on a single goroutine.
+This function can be called concurrently.
+Each call will only run on a same goroutine as the caller.
Encoded blocks can be concatenated and the result will be the combined input stream.
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
@@ -149,10 +155,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
This package:
file out level insize outsize millis mb/s
-silesia.tar zskp 1 211947520 73101992 643 313.87
-silesia.tar zskp 2 211947520 67504318 969 208.38
-silesia.tar zskp 3 211947520 64595893 2007 100.68
-silesia.tar zskp 4 211947520 60995370 8825 22.90
+silesia.tar zskp 1 211947520 73821326 634 318.47
+silesia.tar zskp 2 211947520 67655404 1508 133.96
+silesia.tar zskp 3 211947520 64746933 3000 67.37
+silesia.tar zskp 4 211947520 60073508 16926 11.94
cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56
@@ -161,94 +167,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package:
-silesia.tar gzstd 1 211947520 80007735 1654 122.21
-silesia.tar gzkp 1 211947520 80136201 1152 175.45
+silesia.tar gzstd 1 211947520 80007735 1498 134.87
+silesia.tar gzkp 1 211947520 80088272 1009 200.31
GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z
file out level insize outsize millis mb/s
-gob-stream zskp 1 1911399616 235022249 3088 590.30
-gob-stream zskp 2 1911399616 205669791 3786 481.34
-gob-stream zskp 3 1911399616 175034659 9636 189.17
-gob-stream zskp 4 1911399616 165609838 50369 36.19
+gob-stream zskp 1 1911399616 233948096 3230 564.34
+gob-stream zskp 2 1911399616 203997694 4997 364.73
+gob-stream zskp 3 1911399616 173526523 13435 135.68
+gob-stream zskp 4 1911399616 162195235 47559 38.33
gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70
-gob-stream gzstd 1 1911399616 357382641 10251 177.82
-gob-stream gzkp 1 1911399616 359753026 5438 335.20
+gob-stream gzstd 1 1911399616 357382013 9046 201.49
+gob-stream gzkp 1 1911399616 359136669 4885 373.08
The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
http://mattmahoney.net/dc/textdata.html
file out level insize outsize millis mb/s
-enwik9 zskp 1 1000000000 343848582 3609 264.18
-enwik9 zskp 2 1000000000 317276632 5746 165.97
-enwik9 zskp 3 1000000000 292243069 12162 78.41
-enwik9 zskp 4 1000000000 262183768 82837 11.51
+enwik9 zskp 1 1000000000 343833605 3687 258.64
+enwik9 zskp 2 1000000000 317001237 7672 124.29
+enwik9 zskp 3 1000000000 291915823 15923 59.89
+enwik9 zskp 4 1000000000 261710291 77697 12.27
enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40
-enwik9 gzstd 1 1000000000 382578136 9604 99.30
-enwik9 gzkp 1 1000000000 383825945 6544 145.73
+enwik9 gzstd 1 1000000000 382578136 8608 110.78
+enwik9 gzkp 1 1000000000 382781160 5628 169.45
Highly compressible JSON file.
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
file out level insize outsize millis mb/s
-github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
-github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
-github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
-github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
+github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
+github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
+github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
+github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
-github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
-github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
+github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
+github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z
file out level insize outsize millis mb/s
-rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
-rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
-rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
-rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
+rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
+rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
+rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
+rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
-rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
-rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
+rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
+rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
file out level insize outsize millis mb/s
-nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
-nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
-nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
-nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
+nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
+nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
+nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
+nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
-nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
-nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
+nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
+nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
```
## Decompressor
@@ -283,8 +289,13 @@ func Decompress(in io.Reader, out io.Writer) error {
}
```
-It is important to use the "Close" function when you no longer need the Reader to stop running goroutines.
-See "Allocation-less operation" below.
+It is important to use the "Close" function when you no longer need the Reader to stop running goroutines,
+when running with default settings.
+Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream.
+
+Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput.
+However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data
+as it is being requested only.
For decoding buffers, it could look something like this:
@@ -293,7 +304,7 @@ import "github.com/klauspost/compress/zstd"
// Create a reader that caches decompressors.
// For this operation type we supply a nil Reader.
-var decoder, _ = zstd.NewReader(nil)
+var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
// Decompress a buffer. We don't supply a destination buffer,
// so it will be allocated by the decoder.
@@ -303,9 +314,12 @@ func Decompress(src []byte) ([]byte, error) {
```
Both of these cases should provide the functionality needed.
-The decoder can be used for *concurrent* decompression of multiple buffers.
+The decoder can be used for *concurrent* decompression of multiple buffers.
+By default 4 decompressors will be created.
+
It will only allow a certain number of concurrent operations to run.
-To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders.
### Dictionaries
@@ -357,62 +371,48 @@ In this case no unneeded allocations should be made.
The buffer decoder does everything on the same goroutine and does nothing concurrently.
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
-The stream decoder operates on
+The stream decoder will create goroutines that:
-* One goroutine reads input and splits the input to several block decoders.
-* A number of decoders will decode blocks.
-* A goroutine coordinates these blocks and sends history from one to the next.
+1) Reads input and splits the input into blocks.
+2) Decompression of literals.
+3) Decompression of sequences.
+4) Reconstruction of output stream.
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
+The concurrency level will, for streams, determine how many blocks ahead the compression will start.
+
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
-In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
-
-
+In practice this means that concurrency is often limited to utilizing about 3 cores effectively.
+
### Benchmarks
-These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
-
The first two are streaming decodes and the last are smaller inputs.
-
+
+Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
+
```
-BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
-BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
-
-BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
-BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
-
-Concurrent performance:
-
-BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
-
-BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
-BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
+BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
+BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
+
+Concurrent blocks, performance:
+
+BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
+BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
```
-This reflects the performance around May 2020, but this may be out of date.
+This reflects the performance around May 2022, but this may be out of date.
## Zstd inside ZIP files
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 753d17df634cd..97299d499cf0a 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -7,6 +7,7 @@ package zstd
import (
"encoding/binary"
"errors"
+ "fmt"
"io"
"math/bits"
)
@@ -62,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
return v
}
-func (b *bitReader) get16BitsFast(n uint8) uint16 {
- const regMask = 64 - 1
- v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
- b.bitsRead += n
- return v
-}
-
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {
@@ -132,6 +126,9 @@ func (b *bitReader) remain() uint {
func (b *bitReader) close() error {
// Release reference.
b.in = nil
+ if !b.finished() {
+ return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index b36618285095f..78b3c61be3ecd 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -5,8 +5,6 @@
package zstd
-import "fmt"
-
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits
}
-// flush will flush all pending full bytes.
-// There will be at least 56 bits available for writing when this has been called.
-// Using flush32 is faster, but leaves less space for writing.
-func (b *bitWriter) flush() {
- v := b.nBits >> 3
- switch v {
- case 0:
- case 1:
- b.out = append(b.out,
- byte(b.bitContainer),
- )
- case 2:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- )
- case 3:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- )
- case 4:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- )
- case 5:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- )
- case 6:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- )
- case 7:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- )
- case 8:
- b.out = append(b.out,
- byte(b.bitContainer),
- byte(b.bitContainer>>8),
- byte(b.bitContainer>>16),
- byte(b.bitContainer>>24),
- byte(b.bitContainer>>32),
- byte(b.bitContainer>>40),
- byte(b.bitContainer>>48),
- byte(b.bitContainer>>56),
- )
- default:
- panic(fmt.Errorf("bits (%d) > 64", b.nBits))
- }
- b.bitContainer >>= v << 3
- b.nBits &= 7
-}
-
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 8a98c4562e017..f52d1aed6fe6a 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,9 +5,13 @@
package zstd
import (
+ "bytes"
+ "encoding/binary"
"errors"
"fmt"
"io"
+ "os"
+ "path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@@ -38,14 +42,14 @@ const (
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
maxCompressedBlockSize = 128 << 10
+ compressedBlockOverAlloc = 16
+ maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
+
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
- maxCompressedLiteralSize = 1 << 18
- maxRLELiteralSize = 1 << 20
- maxMatchLen = 131074
- maxSequences = 0x7f00 + 0xffff
+ maxMatchLen = 131074
+ maxSequences = 0x7f00 + 0xffff
// We support slightly less than the reference decoder to be able to
// use ints on 32 bit archs.
@@ -76,20 +80,27 @@ type blockDec struct {
// Window size of the block.
WindowSize uint64
- history chan *history
- input chan struct{}
- result chan decodeOutput
- sequenceBuf []seq
- err error
- decWG sync.WaitGroup
+ err error
+
+ // Check against this crc
+ checkCRC []byte
// Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame.
localFrame *frameDec
+ sequence []seqVals
+
+ async struct {
+ newHist *history
+ literals []byte
+ seqData []byte
+ seqSize int // Size of uncompressed sequences
+ fcs uint64
+ }
+
// Block is RLE, this is the size.
RLESize uint32
- tmp [4]byte
Type blockType
@@ -109,13 +120,8 @@ func (b *blockDec) String() string {
func newBlockDec(lowMem bool) *blockDec {
b := blockDec{
- lowMem: lowMem,
- result: make(chan decodeOutput, 1),
- input: make(chan struct{}, 1),
- history: make(chan *history, 1),
+ lowMem: lowMem,
}
- b.decWG.Add(1)
- go b.startDecoder()
return &b
}
@@ -133,11 +139,17 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.Type = blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
- maxSize := maxBlockSize
+ maxSize := maxCompressedBlockSizeAlloc
switch b.Type {
case blockTypeReserved:
return ErrReservedBlockType
case blockTypeRLE:
+ if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
+ if debugDecoder {
+ printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrWindowSizeExceeded
+ }
b.RLESize = uint32(cSize)
if b.lowMem {
maxSize = cSize
@@ -148,9 +160,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
println("Data size on stream:", cSize)
}
b.RLESize = 0
- maxSize = maxCompressedBlockSize
+ maxSize = maxCompressedBlockSizeAlloc
if windowSize < maxCompressedBlockSize && b.lowMem {
- maxSize = int(windowSize)
+ maxSize = int(windowSize) + compressedBlockOverAlloc
}
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debugDecoder {
@@ -158,7 +170,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return ErrCompressedSizeTooBig
}
+ // Empty compressed blocks must at least be 2 bytes
+ // for Literals_Block_Type and one for Sequences_Section_Header.
+ if cSize < 2 {
+ return ErrBlockTooSmall
+ }
case blockTypeRaw:
+ if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
+ if debugDecoder {
+ printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrWindowSizeExceeded
+ }
+
b.RLESize = 0
// We do not need a destination for raw blocks.
maxSize = -1
@@ -169,9 +193,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
// Read block data.
if cap(b.dataStorage) < cSize {
if b.lowMem || cSize > maxCompressedBlockSize {
- b.dataStorage = make([]byte, 0, cSize)
+ b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
- b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
+ b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
if cap(b.dst) <= maxSize {
@@ -193,85 +217,14 @@ func (b *blockDec) sendErr(err error) {
b.Last = true
b.Type = blockTypeReserved
b.err = err
- b.input <- struct{}{}
}
// Close will release resources.
// Closed blockDec cannot be reset.
func (b *blockDec) Close() {
- close(b.input)
- close(b.history)
- close(b.result)
- b.decWG.Wait()
}
-// decodeAsync will prepare decoding the block when it receives input.
-// This will separate output and history.
-func (b *blockDec) startDecoder() {
- defer b.decWG.Done()
- for range b.input {
- //println("blockDec: Got block input")
- switch b.Type {
- case blockTypeRLE:
- if cap(b.dst) < int(b.RLESize) {
- if b.lowMem {
- b.dst = make([]byte, b.RLESize)
- } else {
- b.dst = make([]byte, maxBlockSize)
- }
- }
- o := decodeOutput{
- d: b,
- b: b.dst[:b.RLESize],
- err: nil,
- }
- v := b.data[0]
- for i := range o.b {
- o.b[i] = v
- }
- hist := <-b.history
- hist.append(o.b)
- b.result <- o
- case blockTypeRaw:
- o := decodeOutput{
- d: b,
- b: b.data,
- err: nil,
- }
- hist := <-b.history
- hist.append(o.b)
- b.result <- o
- case blockTypeCompressed:
- b.dst = b.dst[:0]
- err := b.decodeCompressed(nil)
- o := decodeOutput{
- d: b,
- b: b.dst,
- err: err,
- }
- if debugDecoder {
- println("Decompressed to", len(b.dst), "bytes, error:", err)
- }
- b.result <- o
- case blockTypeReserved:
- // Used for returning errors.
- <-b.history
- b.result <- decodeOutput{
- d: b,
- b: nil,
- err: b.err,
- }
- default:
- panic("Invalid block type")
- }
- if debugDecoder {
- println("blockDec: Finished block")
- }
- }
-}
-
-// decodeAsync will prepare decoding the block when it receives the history.
-// If history is provided, it will not fetch it from the channel.
+// decodeBuf
func (b *blockDec) decodeBuf(hist *history) error {
switch b.Type {
case blockTypeRLE:
@@ -294,14 +247,23 @@ func (b *blockDec) decodeBuf(hist *history) error {
return nil
case blockTypeCompressed:
saved := b.dst
- b.dst = hist.b
- hist.b = nil
+ // Append directly to history
+ if hist.ignoreBuffer == 0 {
+ b.dst = hist.b
+ hist.b = nil
+ } else {
+ b.dst = b.dst[:0]
+ }
err := b.decodeCompressed(hist)
if debugDecoder {
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
}
- hist.b = b.dst
- b.dst = saved
+ if hist.ignoreBuffer == 0 {
+ hist.b = b.dst
+ b.dst = saved
+ } else {
+ hist.appendKeep(b.dst)
+ }
return err
case blockTypeReserved:
// Used for returning errors.
@@ -311,30 +273,18 @@ func (b *blockDec) decodeBuf(hist *history) error {
}
}
-// decodeCompressed will start decompressing a block.
-// If no history is supplied the decoder will decodeAsync as much as possible
-// before fetching from blockDec.history
-func (b *blockDec) decodeCompressed(hist *history) error {
- in := b.data
- delayedHistory := hist == nil
-
- if delayedHistory {
- // We must always grab history.
- defer func() {
- if hist == nil {
- <-b.history
- }
- }()
- }
+func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
if len(in) < 2 {
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
+
litType := literalsBlockType(in[0] & 3)
var litRegenSize int
var litCompSize int
sizeFormat := (in[0] >> 2) & 3
var fourStreams bool
+ var literals []byte
switch litType {
case literalsBlockRaw, literalsBlockRLE:
switch sizeFormat {
@@ -350,7 +300,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
in = in[3:]
@@ -361,7 +311,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
litRegenSize = int(n & 1023)
@@ -372,7 +322,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
fourStreams = true
if len(in) < 4 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
litRegenSize = int(n & 16383)
@@ -382,7 +332,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
fourStreams = true
if len(in) < 5 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
litRegenSize = int(n & 262143)
@@ -393,13 +343,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if debugDecoder {
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
}
- var literals []byte
- var huff *huff0.Scratch
+ if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
+ return in, ErrWindowSizeExceeded
+ }
+
switch litType {
case literalsBlockRaw:
if len(in) < litRegenSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
literals = in[:litRegenSize]
in = in[litRegenSize:]
@@ -407,19 +359,13 @@ func (b *blockDec) decodeCompressed(hist *history) error {
case literalsBlockRLE:
if len(in) < 1 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, litRegenSize)
+ b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else {
- if litRegenSize > maxCompressedLiteralSize {
- // Exceptional
- b.literalBuf = make([]byte, litRegenSize)
- } else {
- b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
-
- }
+ b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
literals = b.literalBuf[:litRegenSize]
@@ -434,7 +380,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
case literalsBlockTreeless:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
// Store compressed literals, so we defer decoding until we get history.
literals = in[:litCompSize]
@@ -442,31 +388,65 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if debugDecoder {
printf("Found %d compressed literals\n", litCompSize)
}
+ huff := hist.huffTree
+ if huff == nil {
+ return in, errors.New("literal block was treeless, but no history was defined")
+ }
+ // Ensure we have space to store it.
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
+ } else {
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
+ }
+ }
+ var err error
+ // Use our out buffer.
+ huff.MaxDecodedSize = litRegenSize
+ if fourStreams {
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+ } else {
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+ }
+ // Make sure we don't leak our literals buffer
+ if err != nil {
+ println("decompressing literals:", err)
+ return in, err
+ }
+ if len(literals) != litRegenSize {
+ return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ }
+
case literalsBlockCompressed:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
literals = in[:litCompSize]
in = in[litCompSize:]
- huff = huffDecoderPool.Get().(*huff0.Scratch)
- var err error
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
- if huff == nil {
- huff = &huff0.Scratch{}
+ huff := hist.huffTree
+ if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
+ huff = huffDecoderPool.Get().(*huff0.Scratch)
+ if huff == nil {
+ huff = &huff0.Scratch{}
+ }
}
+ var err error
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)
- return err
+ return in, err
}
+ hist.huffTree = huff
+ huff.MaxDecodedSize = litRegenSize
// Use our out buffer.
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@@ -475,27 +455,63 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
if err != nil {
println("decoding compressed literals:", err)
- return err
+ return in, err
}
// Make sure we don't leak our literals buffer
if len(literals) != litRegenSize {
- return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
+ // Re-cap to get extra size.
+ literals = b.literalBuf[:len(literals)]
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}
}
+ hist.decoders.literals = literals
+ return in, nil
+}
+
+// decodeCompressed will start decompressing a block.
+func (b *blockDec) decodeCompressed(hist *history) error {
+ in := b.data
+ in, err := b.decodeLiterals(in, hist)
+ if err != nil {
+ return err
+ }
+ err = b.prepareSequences(in, hist)
+ if err != nil {
+ return err
+ }
+ if hist.decoders.nSeqs == 0 {
+ b.dst = append(b.dst, hist.decoders.literals...)
+ return nil
+ }
+ before := len(hist.decoders.out)
+ err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
+ if err != nil {
+ return err
+ }
+ if hist.decoders.maxSyncLen > 0 {
+ hist.decoders.maxSyncLen += uint64(before)
+ hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
+ }
+ b.dst = hist.decoders.out
+ hist.recentOffsets = hist.decoders.prevOffset
+ return nil
+}
+func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
+ if debugDecoder {
+ printf("prepareSequences: %d byte(s) input\n", len(in))
+ }
// Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 {
return ErrBlockTooSmall
}
+ var nSeqs int
seqHeader := in[0]
- nSeqs := 0
switch {
- case seqHeader == 0:
- in = in[1:]
case seqHeader < 128:
nSeqs = int(seqHeader)
in = in[1:]
@@ -512,19 +528,16 @@ func (b *blockDec) decodeCompressed(hist *history) error {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:]
}
- // Allocate sequences
- if cap(b.sequenceBuf) < nSeqs {
- if b.lowMem {
- b.sequenceBuf = make([]seq, nSeqs)
- } else {
- // Allocate max
- b.sequenceBuf = make([]seq, nSeqs, maxSequences)
+ if nSeqs == 0 && len(in) != 0 {
+ // When no sequences, there should not be any more data...
+ if debugDecoder {
+ printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
}
- } else {
- // Reuse buffer
- b.sequenceBuf = b.sequenceBuf[:nSeqs]
+ return ErrUnexpectedBlockSize
}
- var seqs = &sequenceDecs{}
+
+ var seqs = &hist.decoders
+ seqs.nSeqs = nSeqs
if nSeqs > 0 {
if len(in) < 1 {
return ErrBlockTooSmall
@@ -553,6 +566,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
switch mode {
case compModePredefined:
+ if seq.fse != nil && !seq.fse.preDefined {
+ fseDecoderPool.Put(seq.fse)
+ }
seq.fse = &fsePredef[i]
case compModeRLE:
if br.remain() < 1 {
@@ -560,34 +576,36 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
v := br.Uint8()
br.advance(1)
- dec := fseDecoderPool.Get().(*fseDecoder)
+ if seq.fse == nil || seq.fse.preDefined {
+ seq.fse = fseDecoderPool.Get().(*fseDecoder)
+ }
symb, err := decSymbolValue(v, symbolTableX[i])
if err != nil {
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
return err
}
- dec.setRLE(symb)
- seq.fse = dec
+ seq.fse.setRLE(symb)
if debugDecoder {
printf("RLE set to %+v, code: %v", symb, v)
}
case compModeFSE:
println("Reading table for", tableIndex(i))
- dec := fseDecoderPool.Get().(*fseDecoder)
- err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
+ if seq.fse == nil || seq.fse.preDefined {
+ seq.fse = fseDecoderPool.Get().(*fseDecoder)
+ }
+ err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
if err != nil {
println("Read table error:", err)
return err
}
- err = dec.transform(symbolTableX[i])
+ err = seq.fse.transform(symbolTableX[i])
if err != nil {
println("Transform table error:", err)
return err
}
if debugDecoder {
- println("Read table ok", "symbolLen:", dec.symbolLen)
+ println("Read table ok", "symbolLen:", seq.fse.symbolLen)
}
- seq.fse = dec
case compModeRepeat:
seq.repeat = true
}
@@ -597,140 +615,106 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
in = br.unread()
}
-
- // Wait for history.
- // All time spent after this is critical since it is strictly sequential.
- if hist == nil {
- hist = <-b.history
- if hist.error {
- return ErrDecoderClosed
- }
- }
-
- // Decode treeless literal block.
- if litType == literalsBlockTreeless {
- // TODO: We could send the history early WITHOUT the stream history.
- // This would allow decoding treeless literals before the byte history is available.
- // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
- // So not much obvious gain here.
-
- if hist.huffTree == nil {
- return errors.New("literal block was treeless, but no history was defined")
- }
- // Ensure we have space to store it.
- if cap(b.literalBuf) < litRegenSize {
- if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
- } else {
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
- }
- }
- var err error
- // Use our out buffer.
- huff = hist.huffTree
- if fourStreams {
- literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
- } else {
- literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
- }
- // Make sure we don't leak our literals buffer
- if err != nil {
- println("decompressing literals:", err)
- return err
- }
- if len(literals) != litRegenSize {
- return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
- }
- } else {
- if hist.huffTree != nil && huff != nil {
- if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
- huffDecoderPool.Put(hist.huffTree)
- }
- hist.huffTree = nil
- }
- }
- if huff != nil {
- hist.huffTree = huff
- }
if debugDecoder {
- println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
+ println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
}
if nSeqs == 0 {
- // Decompressed content is defined entirely as Literals Section content.
- b.dst = append(b.dst, literals...)
- if delayedHistory {
- hist.append(literals)
+ if len(b.sequence) > 0 {
+ b.sequence = b.sequence[:0]
}
return nil
}
+ br := seqs.br
+ if br == nil {
+ br = &bitReader{}
+ }
+ if err := br.init(in); err != nil {
+ return err
+ }
- seqs, err := seqs.mergeHistory(&hist.decoders)
- if err != nil {
+ if err := seqs.initialize(br, hist, b.dst); err != nil {
+ println("initializing sequences:", err)
return err
}
- if debugDecoder {
- println("History merged ok")
+ // Extract blocks...
+ if false && hist.dict == nil {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
+ var buf bytes.Buffer
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
+ fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
+ buf.Write(in)
+ os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
- br := &bitReader{}
- if err := br.init(in); err != nil {
- return err
+
+ return nil
+}
+
+func (b *blockDec) decodeSequences(hist *history) error {
+ if cap(b.sequence) < hist.decoders.nSeqs {
+ if b.lowMem {
+ b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
+ } else {
+ b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
+ }
}
+ b.sequence = b.sequence[:hist.decoders.nSeqs]
+ if hist.decoders.nSeqs == 0 {
+ hist.decoders.seqSize = len(hist.decoders.literals)
+ return nil
+ }
+ hist.decoders.windowSize = hist.windowSize
+ hist.decoders.prevOffset = hist.recentOffsets
- // TODO: Investigate if sending history without decoders are faster.
- // This would allow the sequences to be decoded async and only have to construct stream history.
- // If only recent offsets were not transferred, this would be an obvious win.
- // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
+ err := hist.decoders.decode(b.sequence)
+ hist.recentOffsets = hist.decoders.prevOffset
+ return err
+}
+func (b *blockDec) executeSequences(hist *history) error {
hbytes := hist.b
if len(hbytes) > hist.windowSize {
hbytes = hbytes[len(hbytes)-hist.windowSize:]
- // We do not need history any more.
+ // We do not need history anymore.
if hist.dict != nil {
hist.dict.content = nil
}
}
-
- if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
- println("initializing sequences:", err)
- return err
- }
-
- err = seqs.decode(nSeqs, br, hbytes)
+ hist.decoders.windowSize = hist.windowSize
+ hist.decoders.out = b.dst[:0]
+ err := hist.decoders.execute(b.sequence, hbytes)
if err != nil {
return err
}
- if !br.finished() {
- return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
- }
+ return b.updateHistory(hist)
+}
- err = br.close()
- if err != nil {
- printf("Closing sequences: %v, %+v\n", err, *br)
- }
+func (b *blockDec) updateHistory(hist *history) error {
if len(b.data) > maxCompressedBlockSize {
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
}
// Set output and release references.
- b.dst = seqs.out
- seqs.out, seqs.literals, seqs.hist = nil, nil, nil
+ b.dst = hist.decoders.out
+ hist.recentOffsets = hist.decoders.prevOffset
- if !delayedHistory {
- // If we don't have delayed history, no need to update.
- hist.recentOffsets = seqs.prevOffset
- return nil
- }
if b.Last {
// if last block we don't care about history.
println("Last block, no history returned")
hist.b = hist.b[:0]
return nil
+ } else {
+ hist.append(b.dst)
+ if debugDecoder {
+ println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
+ }
}
- hist.append(b.dst)
- hist.recentOffsets = seqs.prevOffset
- if debugDecoder {
- println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
- }
+ hist.decoders.out, hist.decoders.literals = nil, nil
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index aab71c6cf851b..176788f259762 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -7,7 +7,6 @@ package zstd
import (
"fmt"
"io"
- "io/ioutil"
)
type byteBuffer interface {
@@ -23,7 +22,7 @@ type byteBuffer interface {
readByte() (byte, error)
// Skip n bytes.
- skipN(n int) error
+ skipN(n int64) error
}
// in-memory buffer
@@ -52,10 +51,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
return r, nil
}
-func (b *byteBuf) remain() []byte {
- return *b
-}
-
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {
@@ -66,9 +61,12 @@ func (b *byteBuf) readByte() (byte, error) {
return r, nil
}
-func (b *byteBuf) skipN(n int) error {
+func (b *byteBuf) skipN(n int64) error {
bb := *b
- if len(bb) < n {
+ if n < 0 {
+ return fmt.Errorf("negative skip (%d) requested", n)
+ }
+ if int64(len(bb)) < n {
return io.ErrUnexpectedEOF
}
*b = bb[n:]
@@ -113,6 +111,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
func (r *readerWrapper) readByte() (byte, error) {
n2, err := r.r.Read(r.tmp[:1])
if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
return 0, err
}
if n2 != 1 {
@@ -121,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) {
return r.tmp[0], nil
}
-func (r *readerWrapper) skipN(n int) error {
- n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
- if n2 != int64(n) {
+func (r *readerWrapper) skipN(n int64) error {
+ n2, err := io.CopyN(io.Discard, r.r, n)
+ if n2 != n {
err = io.ErrUnexpectedEOF
}
return err
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go
index 2c4fca17fa1d7..0e59a242d8dc0 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytereader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go
@@ -13,12 +13,6 @@ type byteReader struct {
off int
}
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
- b.b = in
- b.off = 0
-}
-
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index 69736e8d4bb8c..5022e71c83630 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -5,6 +5,7 @@ package zstd
import (
"bytes"
+ "encoding/binary"
"errors"
"io"
)
@@ -15,18 +16,50 @@ const HeaderMaxSize = 14 + 3
// Header contains information about the first frame and block within that.
type Header struct {
- // Window Size the window of data to keep while decoding.
- // Will only be set if HasFCS is false.
- WindowSize uint64
+ // SingleSegment specifies whether the data is to be decompressed into a
+ // single contiguous memory segment.
+ // It implies that WindowSize is invalid and that FrameContentSize is valid.
+ SingleSegment bool
- // Frame content size.
- // Expected size of the entire frame.
- FrameContentSize uint64
+ // WindowSize is the window of data to keep while decoding.
+ // Will only be set if SingleSegment is false.
+ WindowSize uint64
// Dictionary ID.
// If 0, no dictionary.
DictionaryID uint32
+ // HasFCS specifies whether FrameContentSize has a valid value.
+ HasFCS bool
+
+ // FrameContentSize is the expected uncompressed size of the entire frame.
+ FrameContentSize uint64
+
+ // Skippable will be true if the frame is meant to be skipped.
+ // This implies that FirstBlock.OK is false.
+ Skippable bool
+
+ // SkippableID is the user-specific ID for the skippable frame.
+ // Valid values are between 0 to 15, inclusive.
+ SkippableID int
+
+ // SkippableSize is the length of the user data to skip following
+ // the header.
+ SkippableSize uint32
+
+ // HeaderSize is the raw size of the frame header.
+ //
+ // For normal frames, it includes the size of the magic number and
+ // the size of the header (per section 3.1.1.1).
+ // It does not include the size for any data blocks (section 3.1.1.2) nor
+ // the size for the trailing content checksum.
+ //
+ // For skippable frames, this counts the size of the magic number
+ // along with the size of the size field of the payload.
+ // It does not include the size of the skippable payload itself.
+ // The total frame size is the HeaderSize plus the SkippableSize.
+ HeaderSize int
+
// First block information.
FirstBlock struct {
// OK will be set if first block could be decoded.
@@ -51,17 +84,9 @@ type Header struct {
CompressedSize int
}
- // Skippable will be true if the frame is meant to be skipped.
- // No other information will be populated.
- Skippable bool
-
// If set there is a checksum present for the block content.
+ // The checksum field at the end is always 4 bytes long.
HasCheckSum bool
-
- // If this is true FrameContentSize will have a valid value
- HasFCS bool
-
- SingleSegment bool
}
// Decode the header from the beginning of the stream.
@@ -71,39 +96,46 @@ type Header struct {
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) Decode(in []byte) error {
+ *h = Header{}
if len(in) < 4 {
return io.ErrUnexpectedEOF
}
+ h.HeaderSize += 4
b, in := in[:4], in[4:]
if !bytes.Equal(b, frameMagic) {
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch
}
- *h = Header{Skippable: true}
+ if len(in) < 4 {
+ return io.ErrUnexpectedEOF
+ }
+ h.HeaderSize += 4
+ h.Skippable = true
+ h.SkippableID = int(b[0] & 0xf)
+ h.SkippableSize = binary.LittleEndian.Uint32(in)
return nil
}
+
+ // Read Window_Descriptor
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if len(in) < 1 {
return io.ErrUnexpectedEOF
}
-
- // Clear output
- *h = Header{}
fhd, in := in[0], in[1:]
+ h.HeaderSize++
h.SingleSegment = fhd&(1<<5) != 0
h.HasCheckSum = fhd&(1<<2) != 0
-
if fhd&(1<<3) != 0 {
return errors.New("reserved bit set on frame header")
}
- // Read Window_Descriptor
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if !h.SingleSegment {
if len(in) < 1 {
return io.ErrUnexpectedEOF
}
var wd byte
wd, in = in[0], in[1:]
+ h.HeaderSize++
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
@@ -120,9 +152,7 @@ func (h *Header) Decode(in []byte) error {
return io.ErrUnexpectedEOF
}
b, in = in[:size], in[size:]
- if b == nil {
- return io.ErrUnexpectedEOF
- }
+ h.HeaderSize += int(size)
switch size {
case 1:
h.DictionaryID = uint32(b[0])
@@ -152,9 +182,7 @@ func (h *Header) Decode(in []byte) error {
return io.ErrUnexpectedEOF
}
b, in = in[:fcsSize], in[fcsSize:]
- if b == nil {
- return io.ErrUnexpectedEOF
- }
+ h.HeaderSize += int(fcsSize)
switch fcsSize {
case 1:
h.FrameContentSize = uint64(b[0])
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index f430f58b5726c..78c10755f88a2 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -5,9 +5,13 @@
package zstd
import (
- "errors"
+ "bytes"
+ "context"
+ "encoding/binary"
"io"
"sync"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
)
// Decoder provides decoding of zstandard streams.
@@ -22,12 +26,20 @@ type Decoder struct {
// Unreferenced decoders, ready for use.
decoders chan *blockDec
- // Streams ready to be decoded.
- stream chan decodeStream
-
// Current read position used for Reader functionality.
current decoderState
+ // sync stream decoding
+ syncStream struct {
+ decodedFrame uint64
+ br readerWrapper
+ enabled bool
+ inFrame bool
+ dstBuf []byte
+ }
+
+ frame *frameDec
+
// Custom dictionaries.
// Always uses copies.
dicts map[uint32]dict
@@ -46,7 +58,10 @@ type decoderState struct {
output chan decodeOutput
// cancel remaining output.
- cancel chan struct{}
+ cancel context.CancelFunc
+
+ // crc of current frame
+ crc *xxhash.Digest
flushed bool
}
@@ -81,7 +96,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
return nil, err
}
}
- d.current.output = make(chan decodeOutput, d.o.concurrent)
+ d.current.crc = xxhash.New()
d.current.flushed = true
if r == nil {
@@ -130,7 +145,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
break
}
if !d.nextBlock(n == 0) {
- return n, nil
+ return n, d.current.err
}
}
}
@@ -162,6 +177,7 @@ func (d *Decoder) Reset(r io.Reader) error {
d.drainOutput()
+ d.syncStream.br.r = nil
if r == nil {
d.current.err = ErrDecoderNilInput
if len(d.current.b) > 0 {
@@ -172,21 +188,23 @@ func (d *Decoder) Reset(r io.Reader) error {
}
// If bytes buffer and < 5MB, do sync decoding anyway.
- if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+ if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
bb2 := bb
if debugDecoder {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
}
b := bb2.Bytes()
var dst []byte
- if cap(d.current.b) > 0 {
- dst = d.current.b
+ if cap(d.syncStream.dstBuf) > 0 {
+ dst = d.syncStream.dstBuf[:0]
}
- dst, err := d.DecodeAll(b, dst[:0])
+ dst, err := d.DecodeAll(b, dst)
if err == nil {
err = io.EOF
}
+ // Save output buffer
+ d.syncStream.dstBuf = dst
d.current.b = dst
d.current.err = err
d.current.flushed = true
@@ -195,33 +213,40 @@ func (d *Decoder) Reset(r io.Reader) error {
}
return nil
}
-
- if d.stream == nil {
- d.stream = make(chan decodeStream, 1)
- d.streamWg.Add(1)
- go d.startStreamDecoder(d.stream)
- }
-
// Remove current block.
+ d.stashDecoder()
d.current.decodeOutput = decodeOutput{}
d.current.err = nil
- d.current.cancel = make(chan struct{})
d.current.flushed = false
d.current.d = nil
+ d.syncStream.dstBuf = nil
- d.stream <- decodeStream{
- r: r,
- output: d.current.output,
- cancel: d.current.cancel,
+ // Ensure no-one else is still running...
+ d.streamWg.Wait()
+ if d.frame == nil {
+ d.frame = newFrameDec(d.o)
}
+
+ if d.o.concurrent == 1 {
+ return d.startSyncDecoder(r)
+ }
+
+ d.current.output = make(chan decodeOutput, d.o.concurrent)
+ ctx, cancel := context.WithCancel(context.Background())
+ d.current.cancel = cancel
+ d.streamWg.Add(1)
+ go d.startStreamDecoder(ctx, r, d.current.output)
+
return nil
}
// drainOutput will drain the output until errEndOfStream is sent.
func (d *Decoder) drainOutput() {
if d.current.cancel != nil {
- println("cancelling current")
- close(d.current.cancel)
+ if debugDecoder {
+ println("cancelling current")
+ }
+ d.current.cancel()
d.current.cancel = nil
}
if d.current.d != nil {
@@ -243,12 +268,9 @@ func (d *Decoder) drainOutput() {
}
d.decoders <- v.d
}
- if v.err == errEndOfStream {
- println("current flushed")
- d.current.flushed = true
- return
- }
}
+ d.current.output = nil
+ d.current.flushed = true
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
@@ -287,19 +309,23 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
// DecodeAll can be used concurrently.
// The Decoder concurrency limits will be respected.
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
- if d.current.err == ErrDecoderClosed {
+ if d.decoders == nil {
return dst, ErrDecoderClosed
}
// Grab a block decoder and frame decoder.
block := <-d.decoders
frame := block.localFrame
+ initialSize := len(dst)
defer func() {
if debugDecoder {
printf("re-adding decoder: %p", block)
}
frame.rawInput = nil
frame.bBuf = nil
+ if frame.history.decoders.br != nil {
+ frame.history.decoders.br.in = nil
+ }
d.decoders <- block
}()
frame.bBuf = input
@@ -307,34 +333,52 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
for {
frame.history.reset()
err := frame.reset(&frame.bBuf)
- if err == io.EOF {
- if debugDecoder {
- println("frame reset return EOF")
+ if err != nil {
+ if err == io.EOF {
+ if debugDecoder {
+ println("frame reset return EOF")
+ }
+ return dst, nil
}
- return dst, nil
+ return dst, err
}
if frame.DictionaryID != nil {
dict, ok := d.dicts[*frame.DictionaryID]
if !ok {
return nil, ErrUnknownDictionary
}
+ if debugDecoder {
+ println("setting dict", frame.DictionaryID)
+ }
frame.history.setDict(&dict)
}
- if err != nil {
- return dst, err
- }
- if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
- return dst, ErrDecoderSizeExceeded
+ if frame.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize)
+ }
+ return dst, ErrWindowSizeExceeded
}
- if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
- // Never preallocate moe than 1 GB up front.
+ if frame.FrameContentSize != fcsUnknown {
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
- dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
+ dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
- if cap(dst) == 0 {
+
+ if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
size := len(input) * 2
@@ -352,6 +396,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if err != nil {
return dst, err
}
+ if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+ return dst, ErrDecoderSizeExceeded
+ }
if len(frame.bBuf) == 0 {
if debugDecoder {
println("frame dbuf empty")
@@ -368,33 +415,176 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// If non-blocking mode is used the returned boolean will be false
// if no data was available without blocking.
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
- if d.current.d != nil {
- if debugDecoder {
- printf("re-adding current decoder %p", d.current.d)
- }
- d.decoders <- d.current.d
- d.current.d = nil
- }
if d.current.err != nil {
// Keep error state.
- return blocking
+ return false
+ }
+ d.current.b = d.current.b[:0]
+
+ // SYNC:
+ if d.syncStream.enabled {
+ if !blocking {
+ return false
+ }
+ ok = d.nextBlockSync()
+ if !ok {
+ d.stashDecoder()
+ }
+ return ok
}
+ //ASYNC:
+ d.stashDecoder()
if blocking {
- d.current.decodeOutput = <-d.current.output
+ d.current.decodeOutput, ok = <-d.current.output
} else {
select {
- case d.current.decodeOutput = <-d.current.output:
+ case d.current.decodeOutput, ok = <-d.current.output:
default:
return false
}
}
+ if !ok {
+ // This should not happen, so signal error state...
+ d.current.err = io.ErrUnexpectedEOF
+ return false
+ }
+ next := d.current.decodeOutput
+ if next.d != nil && next.d.async.newHist != nil {
+ d.current.crc.Reset()
+ }
if debugDecoder {
- println("got", len(d.current.b), "bytes, error:", d.current.err)
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
+ println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
+
+ if !d.o.ignoreChecksum && len(next.b) > 0 {
+ n, err := d.current.crc.Write(next.b)
+ if err == nil {
+ if n != len(next.b) {
+ d.current.err = io.ErrShortWrite
+ }
+ }
+ }
+ if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
+ got := d.current.crc.Sum64()
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(got))
+ if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
+ if debugDecoder {
+ println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
+ }
+ d.current.err = ErrCRCMismatch
+ } else {
+ if debugDecoder {
+ println("CRC ok", tmp[:])
+ }
+ }
+ }
+
return true
}
+func (d *Decoder) nextBlockSync() (ok bool) {
+ if d.current.d == nil {
+ d.current.d = <-d.decoders
+ }
+ for len(d.current.b) == 0 {
+ if !d.syncStream.inFrame {
+ d.frame.history.reset()
+ d.current.err = d.frame.reset(&d.syncStream.br)
+ if d.current.err != nil {
+ return false
+ }
+ if d.frame.DictionaryID != nil {
+ dict, ok := d.dicts[*d.frame.DictionaryID]
+ if !ok {
+ d.current.err = ErrUnknownDictionary
+ return false
+ } else {
+ d.frame.history.setDict(&dict)
+ }
+ }
+ if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
+ d.current.err = ErrDecoderSizeExceeded
+ return false
+ }
+
+ d.syncStream.decodedFrame = 0
+ d.syncStream.inFrame = true
+ }
+ d.current.err = d.frame.next(d.current.d)
+ if d.current.err != nil {
+ return false
+ }
+ d.frame.history.ensureBlock()
+ if debugDecoder {
+ println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
+ }
+ histBefore := len(d.frame.history.b)
+ d.current.err = d.current.d.decodeBuf(&d.frame.history)
+
+ if d.current.err != nil {
+ println("error after:", d.current.err)
+ return false
+ }
+ d.current.b = d.frame.history.b[histBefore:]
+ if debugDecoder {
+ println("history after:", len(d.frame.history.b))
+ }
+
+ // Check frame size (before CRC)
+ d.syncStream.decodedFrame += uint64(len(d.current.b))
+ if d.syncStream.decodedFrame > d.frame.FrameContentSize {
+ if debugDecoder {
+ printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
+ }
+ d.current.err = ErrFrameSizeExceeded
+ return false
+ }
+
+ // Check FCS
+ if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
+ if debugDecoder {
+ printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
+ }
+ d.current.err = ErrFrameSizeMismatch
+ return false
+ }
+
+ // Update/Check CRC
+ if d.frame.HasCheckSum {
+ if !d.o.ignoreChecksum {
+ d.frame.crc.Write(d.current.b)
+ }
+ if d.current.d.Last {
+ if !d.o.ignoreChecksum {
+ d.current.err = d.frame.checkCRC()
+ } else {
+ d.current.err = d.frame.consumeCRC()
+ }
+ if d.current.err != nil {
+ println("CRC error:", d.current.err)
+ return false
+ }
+ }
+ }
+ d.syncStream.inFrame = !d.current.d.Last
+ }
+ return true
+}
+
+func (d *Decoder) stashDecoder() {
+ if d.current.d != nil {
+ if debugDecoder {
+ printf("re-adding current decoder %p", d.current.d)
+ }
+ d.decoders <- d.current.d
+ d.current.d = nil
+ }
+}
+
// Close will release all resources.
// It is NOT possible to reuse the decoder after this.
func (d *Decoder) Close() {
@@ -402,10 +592,10 @@ func (d *Decoder) Close() {
return
}
d.drainOutput()
- if d.stream != nil {
- close(d.stream)
+ if d.current.cancel != nil {
+ d.current.cancel()
d.streamWg.Wait()
- d.stream = nil
+ d.current.cancel = nil
}
if d.decoders != nil {
close(d.decoders)
@@ -456,100 +646,305 @@ type decodeOutput struct {
err error
}
-type decodeStream struct {
- r io.Reader
-
- // Blocks ready to be written to output.
- output chan decodeOutput
-
- // cancel reading from the input
- cancel chan struct{}
+func (d *Decoder) startSyncDecoder(r io.Reader) error {
+ d.frame.history.reset()
+ d.syncStream.br = readerWrapper{r: r}
+ d.syncStream.inFrame = false
+ d.syncStream.enabled = true
+ d.syncStream.decodedFrame = 0
+ return nil
}
-// errEndOfStream indicates that everything from the stream was read.
-var errEndOfStream = errors.New("end-of-stream")
-
// Create Decoder:
-// Spawn n block decoders. These accept tasks to decode a block.
-// Create goroutine that handles stream processing, this will send history to decoders as they are available.
-// Decoders update the history as they decode.
-// When a block is returned:
-// a) history is sent to the next decoder,
-// b) content written to CRC.
-// c) return data to WRITER.
-// d) wait for next block to return data.
-// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
-func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
+// ASYNC:
+// Spawn 3 go routines.
+// 0: Read frames and decode block literals.
+// 1: Decode sequences.
+// 2: Execute sequences, send to output.
+func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done()
- frame := newFrameDec(d.o)
- for stream := range inStream {
- if debugDecoder {
- println("got new stream")
+ br := readerWrapper{r: r}
+
+ var seqDecode = make(chan *blockDec, d.o.concurrent)
+ var seqExecute = make(chan *blockDec, d.o.concurrent)
+
+ // Async 1: Decode sequences...
+ go func() {
+ var hist history
+ var hasErr bool
+
+ for block := range seqDecode {
+ if hasErr {
+ if block != nil {
+ seqExecute <- block
+ }
+ continue
+ }
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
+ }
+ hist.reset()
+ hist.decoders = block.async.newHist.decoders
+ hist.recentOffsets = block.async.newHist.recentOffsets
+ hist.windowSize = block.async.newHist.windowSize
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
+ }
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqExecute <- block
+ continue
+ }
+
+ hist.decoders.literals = block.async.literals
+ block.err = block.prepareSequences(block.async.seqData, &hist)
+ if debugDecoder && block.err != nil {
+ println("prepareSequences returned:", block.err)
+ }
+ hasErr = block.err != nil
+ if block.err == nil {
+ block.err = block.decodeSequences(&hist)
+ if debugDecoder && block.err != nil {
+ println("decodeSequences returned:", block.err)
+ }
+ hasErr = block.err != nil
+ // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
+ block.async.seqSize = hist.decoders.seqSize
+ }
+ seqExecute <- block
}
- br := readerWrapper{r: stream.r}
- decodeStream:
- for {
- frame.history.reset()
- err := frame.reset(&br)
- if debugDecoder && err != nil {
- println("Frame decoder returned", err)
+ close(seqExecute)
+ hist.reset()
+ }()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ // Async 3: Execute sequences...
+ frameHistCache := d.frame.history.b
+ go func() {
+ var hist history
+ var decodedFrame uint64
+ var fcs uint64
+ var hasErr bool
+ for block := range seqExecute {
+ out := decodeOutput{err: block.err, d: block}
+ if block.err != nil || hasErr {
+ hasErr = true
+ output <- out
+ continue
}
- if err == nil && frame.DictionaryID != nil {
- dict, ok := d.dicts[*frame.DictionaryID]
- if !ok {
- err = ErrUnknownDictionary
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 2: new history")
+ }
+ hist.reset()
+ hist.windowSize = block.async.newHist.windowSize
+ hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
+ }
+
+ if cap(hist.b) < hist.allocFrameBuffer {
+ if cap(frameHistCache) >= hist.allocFrameBuffer {
+ hist.b = frameHistCache
+ } else {
+ hist.b = make([]byte, 0, hist.allocFrameBuffer)
+ println("Alloc history sized", hist.allocFrameBuffer)
+ }
+ }
+ hist.b = hist.b[:0]
+ fcs = block.async.fcs
+ decodedFrame = 0
+ }
+ do := decodeOutput{err: block.err, d: block}
+ switch block.Type {
+ case blockTypeRLE:
+ if debugDecoder {
+ println("add rle block length:", block.RLESize)
+ }
+
+ if cap(block.dst) < int(block.RLESize) {
+ if block.lowMem {
+ block.dst = make([]byte, block.RLESize)
+ } else {
+ block.dst = make([]byte, maxBlockSize)
+ }
+ }
+ block.dst = block.dst[:block.RLESize]
+ v := block.data[0]
+ for i := range block.dst {
+ block.dst[i] = v
+ }
+ hist.append(block.dst)
+ do.b = block.dst
+ case blockTypeRaw:
+ if debugDecoder {
+ println("add raw block length:", len(block.data))
+ }
+ hist.append(block.data)
+ do.b = block.data
+ case blockTypeCompressed:
+ if debugDecoder {
+ println("execute with history length:", len(hist.b), "window:", hist.windowSize)
+ }
+ hist.decoders.seqSize = block.async.seqSize
+ hist.decoders.literals = block.async.literals
+ do.err = block.executeSequences(&hist)
+ hasErr = do.err != nil
+ if debugDecoder && hasErr {
+ println("executeSequences returned:", do.err)
+ }
+ do.b = block.dst
+ }
+ if !hasErr {
+ decodedFrame += uint64(len(do.b))
+ if decodedFrame > fcs {
+ println("fcs exceeded", block.Last, fcs, decodedFrame)
+ do.err = ErrFrameSizeExceeded
+ hasErr = true
+ } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
+ do.err = ErrFrameSizeMismatch
+ hasErr = true
} else {
- frame.history.setDict(&dict)
+ if debugDecoder {
+ println("fcs ok", block.Last, fcs, decodedFrame)
+ }
}
}
- if err != nil {
- stream.output <- decodeOutput{
- err: err,
+ output <- do
+ }
+ close(output)
+ frameHistCache = hist.b
+ wg.Done()
+ if debugDecoder {
+ println("decoder goroutines finished")
+ }
+ hist.reset()
+ }()
+
+ var hist history
+decodeStream:
+ for {
+ var hasErr bool
+ hist.reset()
+ decodeBlock := func(block *blockDec) {
+ if hasErr {
+ if block != nil {
+ seqDecode <- block
}
- break
+ return
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqDecode <- block
+ return
}
+
+ remain, err := block.decodeLiterals(block.data, &hist)
+ block.err = err
+ hasErr = block.err != nil
+ if err == nil {
+ block.async.literals = hist.decoders.literals
+ block.async.seqData = remain
+ } else if debugDecoder {
+ println("decodeLiterals error:", err)
+ }
+ seqDecode <- block
+ }
+ frame := d.frame
+ if debugDecoder {
+ println("New frame...")
+ }
+ var historySent bool
+ frame.history.reset()
+ err := frame.reset(&br)
+ if debugDecoder && err != nil {
+ println("Frame decoder returned", err)
+ }
+ if err == nil && frame.DictionaryID != nil {
+ dict, ok := d.dicts[*frame.DictionaryID]
+ if !ok {
+ err = ErrUnknownDictionary
+ } else {
+ frame.history.setDict(&dict)
+ }
+ }
+ if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
if debugDecoder {
- println("starting frame decoder")
- }
-
- // This goroutine will forward history between frames.
- frame.frameDone.Add(1)
- frame.initAsync()
-
- go frame.startDecoder(stream.output)
- decodeFrame:
- // Go through all blocks of the frame.
- for {
- dec := <-d.decoders
- select {
- case <-stream.cancel:
- if !frame.sendErr(dec, io.EOF) {
- // To not let the decoder dangle, send it back.
- stream.output <- decodeOutput{d: dec}
- }
- break decodeStream
- default:
+ println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+ }
+
+ err = ErrDecoderSizeExceeded
+ }
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ case dec := <-d.decoders:
+ dec.sendErr(err)
+ decodeBlock(dec)
+ }
+ break decodeStream
+ }
+
+ // Go through all blocks of the frame.
+ for {
+ var dec *blockDec
+ select {
+ case <-ctx.Done():
+ break decodeStream
+ case dec = <-d.decoders:
+ // Once we have a decoder, we MUST return it.
+ }
+ err := frame.next(dec)
+ if !historySent {
+ h := frame.history
+ if debugDecoder {
+ println("Alloc History:", h.allocFrameBuffer)
+ }
+ hist.reset()
+ if h.dict != nil {
+ hist.setDict(h.dict)
+ }
+ dec.async.newHist = &h
+ dec.async.fcs = frame.FrameContentSize
+ historySent = true
+ } else {
+ dec.async.newHist = nil
+ }
+ if debugDecoder && err != nil {
+ println("next block returned error:", err)
+ }
+ dec.err = err
+ dec.checkCRC = nil
+ if dec.Last && frame.HasCheckSum && err == nil {
+ crc, err := frame.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ dec.err = err
}
- err := frame.next(dec)
- switch err {
- case io.EOF:
- // End of current frame, no error
- println("EOF on next block")
- break decodeFrame
- case nil:
- continue
- default:
- println("block decoder returned", err)
- break decodeStream
+ var tmp [4]byte
+ copy(tmp[:], crc)
+ dec.checkCRC = tmp[:]
+ if debugDecoder {
+ println("found crc to check:", dec.checkCRC)
}
}
- // All blocks have started decoding, check if there are more frames.
- println("waiting for done")
- frame.frameDone.Wait()
- println("done waiting...")
+ err = dec.err
+ last := dec.Last
+ decodeBlock(dec)
+ if err != nil {
+ break decodeStream
+ }
+ if last {
+ break
+ }
}
- frame.frameDone.Wait()
- println("Sending EOS")
- stream.output <- decodeOutput{err: errEndOfStream}
}
+ close(seqDecode)
+ wg.Wait()
+ hist.reset()
+ d.frame.history.b = frameHistCache
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index 95cc9b8b81f21..f42448e69c951 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -14,21 +14,28 @@ type DOption func(*decoderOptions) error
// options retains accumulated state of multiple options.
type decoderOptions struct {
- lowMem bool
- concurrent int
- maxDecodedSize uint64
- maxWindowSize uint64
- dicts []dict
+ lowMem bool
+ concurrent int
+ maxDecodedSize uint64
+ maxWindowSize uint64
+ dicts []dict
+ ignoreChecksum bool
+ limitToCap bool
+ decodeBufsBelow int
}
func (o *decoderOptions) setDefault() {
*o = decoderOptions{
// use less ram: true for now, but may change.
- lowMem: true,
- concurrent: runtime.GOMAXPROCS(0),
- maxWindowSize: MaxWindowSize,
+ lowMem: true,
+ concurrent: runtime.GOMAXPROCS(0),
+ maxWindowSize: MaxWindowSize,
+ decodeBufsBelow: 128 << 10,
}
- o.maxDecodedSize = 1 << 63
+ if o.concurrent > 4 {
+ o.concurrent = 4
+ }
+ o.maxDecodedSize = 64 << 30
}
// WithDecoderLowmem will set whether to use a lower amount of memory,
@@ -37,16 +44,25 @@ func WithDecoderLowmem(b bool) DOption {
return func(o *decoderOptions) error { o.lowMem = b; return nil }
}
-// WithDecoderConcurrency will set the concurrency,
-// meaning the maximum number of decoders to run concurrently.
-// The value supplied must be at least 1.
-// By default this will be set to GOMAXPROCS.
+// WithDecoderConcurrency sets the number of created decoders.
+// When decoding block with DecodeAll, this will limit the number
+// of possible concurrently running decodes.
+// When decoding streams, this will limit the number of
+// inflight blocks.
+// When decoding streams and setting maximum to 1,
+// no async decoding will be done.
+// When a value of 0 is provided GOMAXPROCS will be used.
+// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
func WithDecoderConcurrency(n int) DOption {
return func(o *decoderOptions) error {
- if n <= 0 {
+ if n < 0 {
return errors.New("concurrency must be at least 1")
}
- o.concurrent = n
+ if n == 0 {
+ o.concurrent = runtime.GOMAXPROCS(0)
+ } else {
+ o.concurrent = n
+ }
return nil
}
}
@@ -54,7 +70,7 @@ func WithDecoderConcurrency(n int) DOption {
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
-// Maximum and default is 1 << 63 bytes.
+// Maximum is 1 << 63 bytes. Default is 64GiB.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {
@@ -100,3 +116,34 @@ func WithDecoderMaxWindow(size uint64) DOption {
return nil
}
}
+
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.limitToCap = b
+ return nil
+ }
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+ return func(o *decoderOptions) error {
+ o.decodeBufsBelow = size
+ return nil
+ }
+}
+
+// IgnoreChecksum allows to forcibly ignore checksum checking.
+func IgnoreChecksum(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.ignoreChecksum = b
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 96028ecd8366c..dbbb88d92b391 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -32,6 +32,7 @@ type match struct {
length int32
rep int32
est int32
+ _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
}
const highScore = 25000
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index 602c05ee0c4ce..d70e3fd3d3e99 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -156,8 +156,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -416,15 +416,23 @@ encodeLoop:
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 3 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 3
+
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
- cv := load3232(src, s)
+ s2 := s + skipBeginning
+ cv := load3232(src, s2)
candidateL := e.longTable[nextHashL]
- coffsetL := candidateL.offset - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("long match at end-of-match")
@@ -434,12 +442,13 @@ encodeLoop:
// Check prev long...
if true {
- coffsetL = candidateL.prev - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("prev long match at end-of-match")
@@ -518,8 +527,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -674,8 +683,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -1047,8 +1056,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen)
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index d6b3104240b0a..1f4a9a245563b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -127,8 +127,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -439,8 +439,8 @@ encodeLoop:
var t int32
for {
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -785,8 +785,8 @@ encodeLoop:
panic("offset0 was 0")
}
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
@@ -969,7 +969,7 @@ encodeLoop:
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
- longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen)
+ longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen)
e.longTable[longHash1] = te0
e.longTable[longHash2] = te1
e.markLongShardDirty(longHash1)
@@ -1002,8 +1002,8 @@ encodeLoop:
}
// Store this, since we have it.
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen)
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
@@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
- copy(e.longTable[:], e.dictLongTable)
+ //copy(e.longTable[:], e.dictLongTable)
+ e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
@@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
e.longTableShardDirty[i] = false
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index 5f08a2830233f..181edc02b6c5f 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 7
+ const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
@@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 8
+ const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
@@ -871,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
const shardCnt = tableShardCnt
const shardSize = tableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
- copy(e.table[:], e.dictTable)
+ //copy(e.table[:], e.dictTable)
+ e.table = *(*[tableSize]tableEntry)(e.dictTable)
for i := range e.tableShardDirty {
e.tableShardDirty[i] = false
}
@@ -883,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
e.tableShardDirty[i] = false
}
e.allDirty = false
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index e6e315969b00b..7aaaedb23e58c 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) {
if cap(s.filling) == 0 {
s.filling = make([]byte, 0, e.o.blockSize)
}
- if cap(s.current) == 0 {
- s.current = make([]byte, 0, e.o.blockSize)
- }
- if cap(s.previous) == 0 {
- s.previous = make([]byte, 0, e.o.blockSize)
+ if e.o.concurrent > 1 {
+ if cap(s.current) == 0 {
+ s.current = make([]byte, 0, e.o.blockSize)
+ }
+ if cap(s.previous) == 0 {
+ s.previous = make([]byte, 0, e.o.blockSize)
+ }
+ s.current = s.current[:0]
+ s.previous = s.previous[:0]
+ if s.writing == nil {
+ s.writing = &blockEnc{lowMem: e.o.lowMem}
+ s.writing.init()
+ }
+ s.writing.initNewEncode()
}
if s.encoder == nil {
s.encoder = e.o.encoder()
}
- if s.writing == nil {
- s.writing = &blockEnc{lowMem: e.o.lowMem}
- s.writing.init()
- }
- s.writing.initNewEncode()
s.filling = s.filling[:0]
- s.current = s.current[:0]
- s.previous = s.previous[:0]
s.encoder.Reset(e.o.dict, false)
s.headerWritten = false
s.eofWritten = false
@@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error {
return s.err
}
+ // SYNC:
+ if e.o.concurrent == 1 {
+ src := s.filling
+ s.nInput += int64(len(s.filling))
+ if debugEncoder {
+ println("Adding sync block,", len(src), "bytes, final:", final)
+ }
+ enc := s.encoder
+ blk := enc.Block()
+ blk.reset(nil)
+ enc.Encode(blk, src)
+ blk.last = final
+ if final {
+ s.eofWritten = true
+ }
+
+ err := errIncompressible
+ // If we got the exact same number of literals as input,
+ // assume the literals cannot be compressed.
+ if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
+ err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ }
+ switch err {
+ case errIncompressible:
+ if debugEncoder {
+ println("Storing incompressible block as raw")
+ }
+ blk.encodeRaw(src)
+ // In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
+ case nil:
+ default:
+ s.err = err
+ return err
+ }
+ _, s.err = s.w.Write(blk.output)
+ s.nWritten += int64(len(blk.output))
+ s.filling = s.filling[:0]
+ return s.err
+ }
+
// Move blocks forward.
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
@@ -486,8 +528,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If a non-single block is needed the encoder will reset again.
e.encoders <- enc
}()
- // Use single segments when above minimum window and below 1MB.
- single := len(src) < 1<<20 && len(src) > MinWindowSize
+ // Use single segments when above minimum window and below window size.
+ single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
single = *e.o.single
}
@@ -509,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
// If we can do everything in one block, prefer that.
- if len(src) <= maxCompressedBlockSize {
+ if len(src) <= e.o.blockSize {
enc.Reset(e.o.dict, true)
// Slightly faster with no history and everything in one block.
if e.o.crc {
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 7d29e1d689eef..a7c5e1aac4323 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -24,6 +24,7 @@ type encoderOptions struct {
allLitEntropy bool
customWindow bool
customALEntropy bool
+ customBlockSize bool
lowMem bool
dict *dict
}
@@ -33,7 +34,7 @@ func (o *encoderOptions) setDefault() {
concurrent: runtime.GOMAXPROCS(0),
crc: true,
single: nil,
- blockSize: 1 << 16,
+ blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
allLitEntropy: true,
@@ -75,6 +76,7 @@ func WithEncoderCRC(b bool) EOption {
// WithEncoderConcurrency will set the concurrency,
// meaning the maximum number of encoders to run concurrently.
// The value supplied must be at least 1.
+// For streams, setting a value of 1 will disable async compression.
// By default this will be set to GOMAXPROCS.
func WithEncoderConcurrency(n int) EOption {
return func(o *encoderOptions) error {
@@ -106,6 +108,7 @@ func WithWindowSize(n int) EOption {
o.customWindow = true
if o.blockSize > o.windowSize {
o.blockSize = o.windowSize
+ o.customBlockSize = true
}
return nil
}
@@ -188,10 +191,9 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
return SpeedDefault
case level >= 6 && level < 10:
return SpeedBetterCompression
- case level >= 10:
+ default:
return SpeedBestCompression
}
- return SpeedDefault
}
// String provides a string representation of the compression level.
@@ -222,6 +224,9 @@ func WithEncoderLevel(l EncoderLevel) EOption {
switch o.level {
case SpeedFastest:
o.windowSize = 4 << 20
+ if !o.customBlockSize {
+ o.blockSize = 1 << 16
+ }
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
@@ -278,7 +283,7 @@ func WithNoEntropyCompression(b bool) EOption {
// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
-// If this is not specified, block encodes will automatically choose this based on the input size.
+// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
// This setting has no effect on streamed encodes.
func WithSingleSegment(b bool) EOption {
return func(o *encoderOptions) error {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 989c79f8c3150..b6c5054176a68 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -8,23 +8,17 @@ import (
"bytes"
"encoding/hex"
"errors"
- "hash"
"io"
- "sync"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
type frameDec struct {
- o decoderOptions
- crc hash.Hash64
- offset int64
+ o decoderOptions
+ crc *xxhash.Digest
WindowSize uint64
- // In order queue of blocks being decoded.
- decoding chan *blockDec
-
// Frame history passed between blocks
history history
@@ -34,15 +28,10 @@ type frameDec struct {
bBuf byteBuf
FrameContentSize uint64
- frameDone sync.WaitGroup
DictionaryID *uint32
HasCheckSum bool
SingleSegment bool
-
- // asyncRunning indicates whether the async routine processes input on 'decoding'.
- asyncRunningMu sync.Mutex
- asyncRunning bool
}
const (
@@ -117,7 +106,7 @@ func (d *frameDec) reset(br byteBuffer) error {
}
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
println("Skipping frame with", n, "bytes.")
- err = br.skipN(int(n))
+ err = br.skipN(int64(n))
if err != nil {
if debugDecoder {
println("Reading discarded frame", err)
@@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
default:
fcsSize = 1 << v
}
- d.FrameContentSize = 0
+ d.FrameContentSize = fcsUnknown
if fcsSize > 0 {
b, err := br.readSmall(fcsSize)
if err != nil {
@@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error {
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
if debugDecoder {
- println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
+ println("Read FCS:", d.FrameContentSize)
}
}
+
// Move this to shared.
d.HasCheckSum = fhd&(1<<2) != 0
if d.HasCheckSum {
@@ -241,20 +231,27 @@ func (d *frameDec) reset(br byteBuffer) error {
d.crc.Reset()
}
+ if d.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ }
+ return ErrWindowSizeExceeded
+ }
+
if d.WindowSize == 0 && d.SingleSegment {
// We may not need window in this case.
d.WindowSize = d.FrameContentSize
if d.WindowSize < MinWindowSize {
d.WindowSize = MinWindowSize
}
- }
-
- if d.WindowSize > uint64(d.o.maxWindowSize) {
- if debugDecoder {
- printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ if d.WindowSize > d.o.maxDecodedSize {
+ if debugDecoder {
+ printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
+ }
+ return ErrDecoderSizeExceeded
}
- return ErrWindowSizeExceeded
}
+
// The minimum Window_Size is 1 KB.
if d.WindowSize < MinWindowSize {
if debugDecoder {
@@ -263,11 +260,18 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
- if d.o.lowMem && d.history.windowSize < maxBlockSize {
- d.history.maxSize = d.history.windowSize * 2
+ if !d.o.lowMem || d.history.windowSize < maxBlockSize {
+ // Alloc 2x window size if not low-mem, or very small window size.
+ d.history.allocFrameBuffer = d.history.windowSize * 2
} else {
- d.history.maxSize = d.history.windowSize + maxBlockSize
+ // Alloc with one additional block
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
}
+
+ if debugDecoder {
+ println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
+ }
+
// history contains input - maybe we do something
d.rawInput = br
return nil
@@ -276,62 +280,24 @@ func (d *frameDec) reset(br byteBuffer) error {
// next will start decoding the next block from stream.
func (d *frameDec) next(block *blockDec) error {
if debugDecoder {
- printf("decoding new block %p:%p", block, block.data)
+ println("decoding new block")
}
err := block.reset(d.rawInput, d.WindowSize)
if err != nil {
println("block error:", err)
// Signal the frame decoder we have a problem.
- d.sendErr(block, err)
+ block.sendErr(err)
return err
}
- block.input <- struct{}{}
- if debugDecoder {
- println("next block:", block)
- }
- d.asyncRunningMu.Lock()
- defer d.asyncRunningMu.Unlock()
- if !d.asyncRunning {
- return nil
- }
- if block.Last {
- // We indicate the frame is done by sending io.EOF
- d.decoding <- block
- return io.EOF
- }
- d.decoding <- block
return nil
}
-// sendEOF will queue an error block on the frame.
-// This will cause the frame decoder to return when it encounters the block.
-// Returns true if the decoder was added.
-func (d *frameDec) sendErr(block *blockDec, err error) bool {
- d.asyncRunningMu.Lock()
- defer d.asyncRunningMu.Unlock()
- if !d.asyncRunning {
- return false
- }
-
- println("sending error", err.Error())
- block.sendErr(err)
- d.decoding <- block
- return true
-}
-
// checkCRC will check the checksum if the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
- var tmp [4]byte
- got := d.crc.Sum64()
- // Flip to match file order.
- tmp[0] = byte(got >> 0)
- tmp[1] = byte(got >> 8)
- tmp[2] = byte(got >> 16)
- tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4)
@@ -340,6 +306,18 @@ func (d *frameDec) checkCRC() error {
return err
}
+ if d.o.ignoreChecksum {
+ return nil
+ }
+
+ var tmp [4]byte
+ got := d.crc.Sum64()
+ // Flip to match file order.
+ tmp[0] = byte(got >> 0)
+ tmp[1] = byte(got >> 8)
+ tmp[2] = byte(got >> 16)
+ tmp[3] = byte(got >> 24)
+
if !bytes.Equal(tmp[:], want) {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
@@ -352,133 +330,52 @@ func (d *frameDec) checkCRC() error {
return nil
}
-func (d *frameDec) initAsync() {
- if !d.o.lowMem && !d.SingleSegment {
- // set max extra size history to 2MB.
- d.history.maxSize = d.history.windowSize + maxBlockSize
- }
- // re-alloc if more than one extra block size.
- if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
- d.history.b = make([]byte, 0, d.history.maxSize)
- }
- if cap(d.history.b) < d.history.maxSize {
- d.history.b = make([]byte, 0, d.history.maxSize)
- }
- if cap(d.decoding) < d.o.concurrent {
- d.decoding = make(chan *blockDec, d.o.concurrent)
- }
- if debugDecoder {
- h := d.history
- printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
- }
- d.asyncRunningMu.Lock()
- d.asyncRunning = true
- d.asyncRunningMu.Unlock()
-}
-
-// startDecoder will start decoding blocks and write them to the writer.
-// The decoder will stop as soon as an error occurs or at end of frame.
-// When the frame has finished decoding the *bufio.Reader
-// containing the remaining input will be sent on frameDec.frameDone.
-func (d *frameDec) startDecoder(output chan decodeOutput) {
- written := int64(0)
-
- defer func() {
- d.asyncRunningMu.Lock()
- d.asyncRunning = false
- d.asyncRunningMu.Unlock()
-
- // Drain the currently decoding.
- d.history.error = true
- flushdone:
- for {
- select {
- case b := <-d.decoding:
- b.history <- &d.history
- output <- <-b.result
- default:
- break flushdone
- }
- }
- println("frame decoder done, signalling done")
- d.frameDone.Done()
- }()
- // Get decoder for first block.
- block := <-d.decoding
- block.history <- &d.history
- for {
- var next *blockDec
- // Get result
- r := <-block.result
- if r.err != nil {
- println("Result contained error", r.err)
- output <- r
- return
- }
- if debugDecoder {
- println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
- d.offset += int64(len(r.b))
- }
- if !block.Last {
- // Send history to next block
- select {
- case next = <-d.decoding:
- if debugDecoder {
- println("Sending ", len(d.history.b), "bytes as history")
- }
- next.history <- &d.history
- default:
- // Wait until we have sent the block, so
- // other decoders can potentially get the decoder.
- next = nil
- }
- }
-
- // Add checksum, async to decoding.
- if d.HasCheckSum {
- n, err := d.crc.Write(r.b)
- if err != nil {
- r.err = err
- if n != len(r.b) {
- r.err = io.ErrShortWrite
- }
- output <- r
- return
- }
- }
- written += int64(len(r.b))
- if d.SingleSegment && uint64(written) > d.FrameContentSize {
- println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
- r.err = ErrFrameSizeExceeded
- output <- r
- return
- }
- if block.Last {
- r.err = d.checkCRC()
- output <- r
- return
- }
- output <- r
- if next == nil {
- // There was no decoder available, we wait for one now that we have sent to the writer.
- if debugDecoder {
- println("Sending ", len(d.history.b), " bytes as history")
- }
- next = <-d.decoding
- next.history <- &d.history
+// consumeCRC reads the checksum data if the frame has one.
+func (d *frameDec) consumeCRC() error {
+ if d.HasCheckSum {
+ _, err := d.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ return err
}
- block = next
}
+
+ return nil
}
-// runDecoder will create a sync decoder that will decode a block of data.
+// runDecoder will run the decoder for the remainder of the frame.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
// We use the history for output to avoid copying it.
d.history.b = dst
+ d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data.
crcStart := len(dst)
+ d.history.decoders.maxSyncLen = 0
+ if d.o.limitToCap {
+ d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+ }
+ if d.FrameContentSize != fcsUnknown {
+ if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+ d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ }
+ if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen)
+ }
+ if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+ // Alloc for output
+ dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
+ copy(dst2, dst)
+ dst = dst2
+ }
+ }
var err error
for {
err = dec.reset(d.rawInput, d.WindowSize)
@@ -489,29 +386,47 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
println("next block:", dec)
}
err = dec.decodeBuf(&d.history)
- if err != nil || dec.Last {
+ if err != nil {
break
}
- if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+ println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
err = ErrDecoderSizeExceeded
break
}
- if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
- println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
+ if d.o.limitToCap && len(d.history.b) > cap(dst) {
+ println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
+ println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded
break
}
+ if dec.Last {
+ break
+ }
+ if debugDecoder {
+ println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
+ }
}
dst = d.history.b
if err == nil {
- if d.HasCheckSum {
- var n int
- n, err = d.crc.Write(dst[crcStart:])
- if err == nil {
- if n != len(dst)-crcStart {
- err = io.ErrShortWrite
- } else {
- err = d.checkCRC()
+ if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
+ err = ErrFrameSizeMismatch
+ } else if d.HasCheckSum {
+ if d.o.ignoreChecksum {
+ err = d.consumeCRC()
+ } else {
+ var n int
+ n, err = d.crc.Write(dst[crcStart:])
+ if err == nil {
+ if n != len(dst)-crcStart {
+ err = io.ErrShortWrite
+ } else {
+ err = d.checkCRC()
+ }
}
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index bb3d4fd6c3129..2f8860a722b8f 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -5,8 +5,10 @@
package zstd
import (
+ "encoding/binary"
"errors"
"fmt"
+ "io"
)
const (
@@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<<s.actualTableLog)
}
b.advance((bitCount + 7) >> 3)
- // println(s.norm[:s.symbolLen], s.symbolLen)
return s.buildDtable()
}
+func (s *fseDecoder) mustReadFrom(r io.Reader) {
+ fatalErr := func(err error) {
+ if err != nil {
+ panic(err)
+ }
+ }
+ // dt [maxTablesize]decSymbol // Decompression table.
+ // symbolLen uint16 // Length of active part of the symbol table.
+ // actualTableLog uint8 // Selected tablelog.
+ // maxBits uint8 // Maximum number of additional bits
+ // // used for table creation to avoid allocations.
+ // stateTable [256]uint16
+ // norm [maxSymbolValue + 1]int16
+ // preDefined bool
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
+ fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
+}
+
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.
@@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 {
return uint16(d >> 16)
}
-func (d decSymbol) baseline() uint32 {
- return uint32(d >> 32)
-}
-
func (d decSymbol) baselineInt() int {
return int(d >> 32)
}
-func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
- *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
-}
-
func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits)
@@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) {
*d = (*d & mask) | decSymbol(state)<<16
}
-func (d *decSymbol) setBaseline(baseline uint32) {
- const mask = 0xffffffff
- *d = (*d & mask) | decSymbol(baseline)<<32
-}
-
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
@@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) {
s.dt[0] = symbol
}
-// buildDtable will build the decoding table.
-func (s *fseDecoder) buildDtable() error {
- tableSize := uint32(1 << s.actualTableLog)
- highThreshold := tableSize - 1
- symbolNext := s.stateTable[:256]
-
- // Init, lay down lowprob symbols
- {
- for i, v := range s.norm[:s.symbolLen] {
- if v == -1 {
- s.dt[highThreshold].setAddBits(uint8(i))
- highThreshold--
- symbolNext[i] = 1
- } else {
- symbolNext[i] = uint16(v)
- }
- }
- }
- // Spread symbols
- {
- tableMask := tableSize - 1
- step := tableStep(tableSize)
- position := uint32(0)
- for ss, v := range s.norm[:s.symbolLen] {
- for i := 0; i < int(v); i++ {
- s.dt[position].setAddBits(uint8(ss))
- position = (position + step) & tableMask
- for position > highThreshold {
- // lowprob area
- position = (position + step) & tableMask
- }
- }
- }
- if position != 0 {
- // position must reach all cells once, otherwise normalizedCounter is incorrect
- return errors.New("corrupted input (position != 0)")
- }
- }
-
- // Build Decoding table
- {
- tableSize := uint16(1 << s.actualTableLog)
- for u, v := range s.dt[:tableSize] {
- symbol := v.addBits()
- nextState := symbolNext[symbol]
- symbolNext[symbol] = nextState + 1
- nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
- s.dt[u&maxTableMask].setNBits(nBits)
- newState := (nextState << nBits) - tableSize
- if newState > tableSize {
- return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
- }
- if newState == uint16(u) && nBits == 0 {
- // Seems weird that this is possible with nbits > 0.
- return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
- }
- s.dt[u&maxTableMask].setNewState(newState)
- }
- }
- return nil
-}
-
// transform will transform the decoder table into a table usable for
// decoding without having to apply the transformation while decoding.
// The state will contain the base value and the number of bits to read.
@@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.state = dt[br.getBits(tableLog)]
}
-// next returns the current symbol and sets the next state.
-// At least tablelog bits must be available in the bit reader.
-func (s *fseState) next(br *bitReader) {
- lowBits := uint16(br.getBits(s.state.nbBits()))
- s.state = s.dt[s.state.newState()+lowBits]
-}
-
-// finished returns true if all bits have been read from the bitstream
-// and the next state would require reading bits from the input.
-func (s *fseState) finished(br *bitReader) bool {
- return br.finished() && s.state.nbBits() > 0
-}
-
-// final returns the current state symbol without decoding the next.
-func (s *fseState) final() (int, uint8) {
- return s.state.baselineInt(), s.state.addBits()
-}
-
// final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits()
}
-
-// nextFast returns the next symbol and sets the next state.
-// This can only be used if no symbols are 0 bits.
-// At least tablelog bits must be available in the bit reader.
-func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
- lowBits := br.get16BitsFast(s.state.nbBits())
- s.state = s.dt[s.state.newState()+lowBits]
- return s.state.baseline(), s.state.addBits()
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
new file mode 100644
index 0000000000000..d04a829b0a0e7
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
@@ -0,0 +1,65 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+)
+
+type buildDtableAsmContext struct {
+ // inputs
+ stateTable *uint16
+ norm *int16
+ dt *uint64
+
+ // outputs --- set by the procedure in the case of error;
+ // for interpretation please see the error handling part below
+ errParam1 uint64
+ errParam2 uint64
+}
+
+// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
+// Function returns non-zero exit code on error.
+//
+//go:noescape
+func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+
+// please keep in sync with _generate/gen_fse.go
+const (
+ errorCorruptedNormalizedCounter = 1
+ errorNewStateTooBig = 2
+ errorNewStateNoBits = 3
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ ctx := buildDtableAsmContext{
+ stateTable: &s.stateTable[0],
+ norm: &s.norm[0],
+ dt: (*uint64)(&s.dt[0]),
+ }
+ code := buildDtable_asm(s, &ctx)
+
+ if code != 0 {
+ switch code {
+ case errorCorruptedNormalizedCounter:
+ position := ctx.errParam1
+ return fmt.Errorf("corrupted input (position=%d, expected 0)", position)
+
+ case errorNewStateTooBig:
+ newState := decSymbol(ctx.errParam1)
+ size := ctx.errParam2
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, size)
+
+ case errorNewStateNoBits:
+ newState := decSymbol(ctx.errParam1)
+ oldState := decSymbol(ctx.errParam2)
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState)
+
+ default:
+ return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
new file mode 100644
index 0000000000000..bcde398695351
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -0,0 +1,126 @@
+// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
+TEXT ·buildDtable_asm(SB), $0-24
+ MOVQ ctx+8(FP), CX
+ MOVQ s+0(FP), DI
+
+ // Load values
+ MOVBQZX 4098(DI), DX
+ XORQ AX, AX
+ BTSQ DX, AX
+ MOVQ (CX), BX
+ MOVQ 16(CX), SI
+ LEAQ -1(AX), R8
+ MOVQ 8(CX), CX
+ MOVWQZX 4096(DI), DI
+
+ // End load values
+ // Init, lay down lowprob symbols
+ XORQ R9, R9
+ JMP init_main_loop_condition
+
+init_main_loop:
+ MOVWQSX (CX)(R9*2), R10
+ CMPW R10, $-1
+ JNE do_not_update_high_threshold
+ MOVB R9, 1(SI)(R8*8)
+ DECQ R8
+ MOVQ $0x0000000000000001, R10
+
+do_not_update_high_threshold:
+ MOVW R10, (BX)(R9*2)
+ INCQ R9
+
+init_main_loop_condition:
+ CMPQ R9, DI
+ JL init_main_loop
+
+ // Spread symbols
+ // Calculate table step
+ MOVQ AX, R9
+ SHRQ $0x01, R9
+ MOVQ AX, R10
+ SHRQ $0x03, R10
+ LEAQ 3(R9)(R10*1), R9
+
+ // Fill add bits values
+ LEAQ -1(AX), R10
+ XORQ R11, R11
+ XORQ R12, R12
+ JMP spread_main_loop_condition
+
+spread_main_loop:
+ XORQ R13, R13
+ MOVWQSX (CX)(R12*2), R14
+ JMP spread_inner_loop_condition
+
+spread_inner_loop:
+ MOVB R12, 1(SI)(R11*8)
+
+adjust_position:
+ ADDQ R9, R11
+ ANDQ R10, R11
+ CMPQ R11, R8
+ JG adjust_position
+ INCQ R13
+
+spread_inner_loop_condition:
+ CMPQ R13, R14
+ JL spread_inner_loop
+ INCQ R12
+
+spread_main_loop_condition:
+ CMPQ R12, DI
+ JL spread_main_loop
+ TESTQ R11, R11
+ JZ spread_check_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R11, 24(AX)
+ MOVQ $+1, ret+16(FP)
+ RET
+
+spread_check_ok:
+ // Build Decoding table
+ XORQ DI, DI
+
+build_table_main_table:
+ MOVBQZX 1(SI)(DI*8), CX
+ MOVWQZX (BX)(CX*2), R8
+ LEAQ 1(R8), R9
+ MOVW R9, (BX)(CX*2)
+ MOVQ R8, R9
+ BSRQ R9, R9
+ MOVQ DX, CX
+ SUBQ R9, CX
+ SHLQ CL, R8
+ SUBQ AX, R8
+ MOVB CL, (SI)(DI*8)
+ MOVW R8, 2(SI)(DI*8)
+ CMPQ R8, AX
+ JLE build_table_check1_ok
+ MOVQ ctx+8(FP), CX
+ MOVQ R8, 24(CX)
+ MOVQ AX, 32(CX)
+ MOVQ $+2, ret+16(FP)
+ RET
+
+build_table_check1_ok:
+ TESTB CL, CL
+ JNZ build_table_check2_ok
+ CMPW R8, DI
+ JNE build_table_check2_ok
+ MOVQ ctx+8(FP), AX
+ MOVQ R8, 24(AX)
+ MOVQ DI, 32(AX)
+ MOVQ $+3, ret+16(FP)
+ RET
+
+build_table_check2_ok:
+ INCQ DI
+ CMPQ DI, AX
+ JL build_table_main_table
+ MOVQ $+0, ret+16(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
new file mode 100644
index 0000000000000..332e51fe44fae
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -0,0 +1,72 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "errors"
+ "fmt"
+)
+
+// buildDtable will build the decoding table.
+func (s *fseDecoder) buildDtable() error {
+ tableSize := uint32(1 << s.actualTableLog)
+ highThreshold := tableSize - 1
+ symbolNext := s.stateTable[:256]
+
+ // Init, lay down lowprob symbols
+ {
+ for i, v := range s.norm[:s.symbolLen] {
+ if v == -1 {
+ s.dt[highThreshold].setAddBits(uint8(i))
+ highThreshold--
+ symbolNext[i] = 1
+ } else {
+ symbolNext[i] = uint16(v)
+ }
+ }
+ }
+
+ // Spread symbols
+ {
+ tableMask := tableSize - 1
+ step := tableStep(tableSize)
+ position := uint32(0)
+ for ss, v := range s.norm[:s.symbolLen] {
+ for i := 0; i < int(v); i++ {
+ s.dt[position].setAddBits(uint8(ss))
+ position = (position + step) & tableMask
+ for position > highThreshold {
+ // lowprob area
+ position = (position + step) & tableMask
+ }
+ }
+ }
+ if position != 0 {
+ // position must reach all cells once, otherwise normalizedCounter is incorrect
+ return errors.New("corrupted input (position != 0)")
+ }
+ }
+
+ // Build Decoding table
+ {
+ tableSize := uint16(1 << s.actualTableLog)
+ for u, v := range s.dt[:tableSize] {
+ symbol := v.addBits()
+ nextState := symbolNext[symbol]
+ symbolNext[symbol] = nextState + 1
+ nBits := s.actualTableLog - byte(highBits(uint32(nextState)))
+ s.dt[u&maxTableMask].setNBits(nBits)
+ newState := (nextState << nBits) - tableSize
+ if newState > tableSize {
+ return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize)
+ }
+ if newState == uint16(u) && nBits == 0 {
+ // Seems weird that this is possible with nbits > 0.
+ return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u)
+ }
+ s.dt[u&maxTableMask].setNewState(newState)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index 5442061b18df5..ab26326a8ff80 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.clearCount = maxCount != 0
}
-// prepare will prepare and allocate scratch tables used for both compression and decompression.
-func (s *fseEncoder) prepare() (*fseEncoder, error) {
- if s == nil {
- s = &fseEncoder{}
- }
- s.useRLE = false
- if s.clearCount && s.maxCount == 0 {
- for i := range s.count {
- s.count[i] = 0
- }
- s.clearCount = false
- }
- return s, nil
-}
-
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() {
@@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.state = c.stateTable[lu]
}
-// encode the output symbol provided and write it to the bitstream.
-func (c *cState) encode(symbolTT symbolTransform) {
- nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
- dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
- c.bw.addBits16NC(c.state, uint8(nbBitsOut))
- c.state = c.stateTable[dstState]
-}
-
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go
index cf33f29a1b488..5d73c21ebdd4b 100644
--- a/vendor/github.com/klauspost/compress/zstd/hash.go
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go
@@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
-
-// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
-// Preferably h should be a constant and should always be <32.
-func hash3(u uint32, h uint8) uint32 {
- return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index f783e32d251b4..09164856d2224 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -10,40 +10,48 @@ import (
// history contains the information transferred between blocks.
type history struct {
- b []byte
- huffTree *huff0.Scratch
- recentOffsets [3]int
+ // Literal decompression
+ huffTree *huff0.Scratch
+
+ // Sequence decompression
decoders sequenceDecs
- windowSize int
- maxSize int
- error bool
- dict *dict
+ recentOffsets [3]int
+
+ // History buffer...
+ b []byte
+
+ // ignoreBuffer is meant to ignore a number of bytes
+ // when checking for matches in history
+ ignoreBuffer int
+
+ windowSize int
+ allocFrameBuffer int // needed?
+ error bool
+ dict *dict
}
// reset will reset the history to initial state of a frame.
// The history must already have been initialized to the desired size.
func (h *history) reset() {
h.b = h.b[:0]
+ h.ignoreBuffer = 0
h.error = false
h.recentOffsets = [3]int{1, 4, 8}
- if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- h.decoders = sequenceDecs{}
+ h.decoders.freeDecoders()
+ h.decoders = sequenceDecs{br: h.decoders.br}
+ h.freeHuffDecoder()
+ h.huffTree = nil
+ h.dict = nil
+ //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
+}
+
+func (h *history) freeHuffDecoder() {
if h.huffTree != nil {
if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree)
+ h.huffTree = nil
}
}
- h.huffTree = nil
- h.dict = nil
- //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
}
func (h *history) setDict(dict *dict) {
@@ -54,6 +62,7 @@ func (h *history) setDict(dict *dict) {
h.decoders.litLengths = dict.llDec
h.decoders.offsets = dict.ofDec
h.decoders.matchLengths = dict.mlDec
+ h.decoders.dict = dict.content
h.recentOffsets = dict.offsets
h.huffTree = dict.litEnc
}
@@ -83,6 +92,24 @@ func (h *history) append(b []byte) {
copy(h.b[h.windowSize-len(b):], b)
}
+// ensureBlock will ensure there is space for at least one block...
+func (h *history) ensureBlock() {
+ if cap(h.b) < h.allocFrameBuffer {
+ h.b = make([]byte, 0, h.allocFrameBuffer)
+ return
+ }
+
+ avail := cap(h.b) - len(h.b)
+ if avail >= h.windowSize || avail > maxCompressedBlockSize {
+ return
+ }
+ // Move data down so we only have window size left.
+ // We know we have less than window size in b at this point.
+ discard := len(h.b) - h.windowSize
+ copy(h.b, h.b[discard:])
+ h.b = h.b[:h.windowSize]
+}
+
// append bytes to history without ever discarding anything.
func (h *history) appendKeep(b []byte) {
h.b = append(h.b, b...)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index be8db5bf79601..cea1785619709 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -1,6 +1,7 @@
// +build !appengine
// +build gc
// +build !purego
+// +build !noasm
#include "textflag.h"
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 6626095890795..4d64a17d69c12 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -1,13 +1,13 @@
-// +build gc,!purego
+// +build gc,!purego,!noasm
#include "textflag.h"
// Register allocation.
#define digest R1
-#define h R2 // Return value.
-#define p R3 // Input pointer.
+#define h R2 // Return value.
+#define p R3 // Input pointer.
#define len R4
-#define nblocks R5 // len / 32.
+#define nblocks R5 // len / 32.
#define prime1 R7
#define prime2 R8
#define prime3 R9
@@ -22,50 +22,48 @@
#define x3 R22
#define x4 R23
-#define round(acc, x) \
- MADD prime2, acc, x, acc \
- ROR $64-31, acc \
- MUL prime1, acc \
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc \
// x = round(0, x).
-#define round0(x) \
- MUL prime2, x \
- ROR $64-31, x \
- MUL prime1, x \
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x \
-#define mergeRound(x) \
- round0(x) \
- EOR x, h \
- MADD h, prime4, prime1, h \
+#define mergeRound(x) \
+ round0(x) \
+ EOR x, h \
+ MADD h, prime4, prime1, h \
// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
-#define blocksLoop() \
- LSR $5, len, nblocks \
- PCALIGN $16 \
-loop: \
- LDP.P 32(p), (x1, x2) \
- round(v1, x1) \
- LDP -16(p), (x3, x4) \
- round(v2, x2) \
- SUB $1, nblocks \
- round(v3, x3) \
- round(v4, x4) \
- CBNZ nblocks, loop \
-
+#define blocksLoop() \
+ LSR $5, len, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 32(p), (x1, x2) \
+ round(v1, x1) \
+ LDP -16(p), (x3, x4) \
+ round(v2, x2) \
+ SUB $1, nblocks \
+ round(v3, x3) \
+ round(v4, x4) \
+ CBNZ nblocks, loop \
// The primes are repeated here to ensure that they're stored
// in a contiguous array, so we can load them with LDP.
-DATA primes<> +0(SB)/8, $11400714785074694791
-DATA primes<> +8(SB)/8, $14029467366897019727
-DATA primes<>+16(SB)/8, $1609587929392839161
-DATA primes<>+24(SB)/8, $9650029242287828579
-DATA primes<>+32(SB)/8, $2870177450012600261
+DATA primes<> +0(SB)/8, $11400714785074694791
+DATA primes<> +8(SB)/8, $14029467366897019727
+DATA primes<>+16(SB)/8, $1609587929392839161
+DATA primes<>+24(SB)/8, $9650029242287828579
+DATA primes<>+32(SB)/8, $2870177450012600261
GLOBL primes<>(SB), NOPTR+RODATA, $40
-
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
- LDP b_base+0(FP), (p, len)
+ LDP b_base+0(FP), (p, len)
LDP primes<> +0(SB), (prime1, prime2)
LDP primes<>+16(SB), (prime3, prime4)
@@ -156,24 +154,23 @@ try1:
end:
EOR h >> 33, h
- MUL prime2, h
+ MUL prime2, h
EOR h >> 29, h
- MUL prime3, h
+ MUL prime3, h
EOR h >> 32, h
MOVD h, ret+24(FP)
RET
-
// func writeBlocks(d *Digest, b []byte) int
//
// Assumes len(b) >= 32.
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
- LDP primes<>(SB), (prime1, prime2)
+ LDP primes<>(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
MOVD d+0(FP), digest
- LDP 0(digest), (v1, v2)
+ LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
LDP b_base+8(FP), (p, len)
@@ -181,7 +178,7 @@ TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
blocksLoop()
// Store updated state.
- STP (v1, v2), 0(digest)
+ STP (v1, v2), 0(digest)
STP (v3, v4), 16(digest)
BIC $31, len
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
index 9216e0a40c1a4..1a1fac9c26130 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -1,8 +1,9 @@
-//go:build (amd64 || arm64) && !appengine && gc && !purego
+//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm
// +build amd64 arm64
// +build !appengine
// +build gc
// +build !purego
+// +build !noasm
package xxhash
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index 2deb1ca755328..209cb4a999c3a 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -1,5 +1,5 @@
-//go:build (!amd64 && !arm64) || appengine || !gc || purego
-// +build !amd64,!arm64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm
+// +build !amd64,!arm64 appengine !gc purego noasm
package xxhash
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index bc731e4cb69ae..f833d1541f982 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -20,6 +20,10 @@ type seq struct {
llCode, mlCode, ofCode uint8
}
+type seqVals struct {
+ ll, ml, mo int
+}
+
func (s seq) String() string {
if s.offset <= 3 {
if s.offset == 0 {
@@ -61,16 +65,19 @@ type sequenceDecs struct {
offsets sequenceDec
matchLengths sequenceDec
prevOffset [3]int
- hist []byte
dict []byte
literals []byte
out []byte
+ nSeqs int
+ br *bitReader
+ seqSize int
windowSize int
maxBits uint8
+ maxSyncLen uint64
}
// initialize all 3 decoders from the stream input.
-func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
+func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error {
if err := s.litLengths.init(br); err != nil {
return errors.New("litLengths:" + err.Error())
}
@@ -80,8 +87,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
if err := s.matchLengths.init(br); err != nil {
return errors.New("matchLengths:" + err.Error())
}
- s.literals = literals
- s.hist = hist.b
+ s.br = br
s.prevOffset = hist.recentOffsets
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
s.windowSize = hist.windowSize
@@ -93,12 +99,142 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
return nil
}
+func (s *sequenceDecs) freeDecoders() {
+ if f := s.litLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.litLengths.fse = nil
+ }
+ if f := s.offsets.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.offsets.fse = nil
+ }
+ if f := s.matchLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.matchLengths.fse = nil
+ }
+}
+
+// execute will execute the decoded sequence with the provided history.
+// The sequence must be evaluated before being sent.
+func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
+ if len(s.dict) == 0 {
+ return s.executeSimple(seqs, hist)
+ }
+
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize > cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Copy from dictionary...
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ if len(s.dict) == 0 {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // we may be in dictionary.
+ dictO := len(s.dict) - (seq.mo - (t + len(hist)))
+ if dictO < 0 || dictO >= len(s.dict) {
+ return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
+ }
+ end := dictO + seq.ml
+ if end > len(s.dict) {
+ n := len(s.dict) - dictO
+ copy(out[t:], s.dict[dictO:])
+ t += n
+ seq.ml -= n
+ } else {
+ copy(out[t:], s.dict[dictO:end])
+ t += end - dictO
+ continue
+ }
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+ // We must be in current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ continue
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
+
// decode sequences from the stream with the provided history.
-func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
+func (s *sequenceDecs) decodeSync(hist []byte) error {
+ supported, err := s.decodeSyncSimple(hist)
+ if supported {
+ return err
+ }
+
+ br := s.br
+ seqs := s.nSeqs
startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ out := s.out
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
@@ -151,7 +287,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
- println("temp was 0")
+ println("WARNING: temp was 0")
temp = 1
}
@@ -176,51 +312,52 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if ll > len(s.literals) {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
}
- size := ll + ml + len(s.out)
+ size := ll + ml + len(out)
if size-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", size)
+ if size-startSize == 424242 {
+ panic("here")
+ }
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
- if size > cap(s.out) {
+ if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
// but could be if destination slice is too small for sync operations.
// over-allocating here can create a large amount of GC pressure so we try to keep
// it as contained as possible
- used := len(s.out) - startSize
+ used := len(out) - startSize
addBytes := 256 + ll + ml + used>>2
// Clamp to max block size.
if used+addBytes > maxBlockSize {
addBytes = maxBlockSize - used
}
- s.out = append(s.out, make([]byte, addBytes)...)
- s.out = s.out[:len(s.out)-addBytes]
+ out = append(out, make([]byte, addBytes)...)
+ out = out[:len(out)-addBytes]
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
// Add literals
- s.out = append(s.out, s.literals[:ll]...)
+ out = append(out, s.literals[:ll]...)
s.literals = s.literals[ll:]
- out := s.out
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
- if mo > len(s.out)+len(hist) || mo > s.windowSize {
+ if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
// we may be in dictionary.
- dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
+ dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
end := dictO + ml
if end > len(s.dict) {
out = append(out, s.dict[dictO:]...)
- mo -= len(s.dict) - dictO
ml -= len(s.dict) - dictO
} else {
out = append(out, s.dict[dictO:end]...)
@@ -231,26 +368,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
// Copy from history.
// TODO: Blocks without history could be made to ignore this completely.
- if v := mo - len(s.out); v > 0 {
+ if v := mo - len(out); v > 0 {
// v is the start position in history from end.
- start := len(s.hist) - v
+ start := len(hist) - v
if ml > v {
// Some goes into current block.
// Copy remainder of history
- out = append(out, s.hist[start:]...)
- mo -= v
+ out = append(out, hist[start:]...)
ml -= v
} else {
- out = append(out, s.hist[start:start+ml]...)
+ out = append(out, hist[start:start+ml]...)
ml = 0
}
}
// We must be in current buffer now
if ml > 0 {
- start := len(s.out) - mo
- if ml <= len(s.out)-start {
+ start := len(out) - mo
+ if ml <= len(out)-start {
// No overlap
- out = append(out, s.out[start:start+ml]...)
+ out = append(out, out[start:start+ml]...)
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
@@ -264,7 +400,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
}
}
}
- s.out = out
if i == 0 {
// This is the last sequence, so we shouldn't update state.
break
@@ -279,6 +414,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
+
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@@ -291,19 +427,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
}
}
- // Add final literals
- s.out = append(s.out, s.literals...)
- return nil
-}
+ // Check if space for literals
+ if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
-// update states, at least 27 bits must be available.
-func (s *sequenceDecs) update(br *bitReader) {
- // Max 8 bits
- s.litLengths.state.next(br)
- // Max 9 bits
- s.matchLengths.state.next(br)
- // Max 8 bits
- s.offsets.state.next(br)
+ // Add final literals
+ s.out = append(out, s.literals...)
+ return br.close()
}
var bitMask [16]uint16
@@ -314,87 +445,6 @@ func init() {
}
}
-// update states, at least 27 bits must be available.
-func (s *sequenceDecs) updateAlt(br *bitReader) {
- // Update all 3 states at once. Approx 20% faster.
- a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
-
- nBits := a.nbBits() + b.nbBits() + c.nbBits()
- if nBits == 0 {
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
- s.offsets.state.state = s.offsets.state.dt[c.newState()]
- return
- }
- bits := br.get32BitsFast(nBits)
- lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
-
- lowBits = uint16(bits >> (c.nbBits() & 31))
- lowBits &= bitMask[b.nbBits()&15]
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
-
- lowBits = uint16(bits) & bitMask[c.nbBits()&15]
- s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
-}
-
-// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
-func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
- // Final will not read from stream.
- ll, llB := llState.final()
- ml, mlB := mlState.final()
- mo, moB := ofState.final()
-
- // extra bits are stored in reverse order.
- br.fillFast()
- mo += br.getBits(moB)
- if s.maxBits > 32 {
- br.fillFast()
- }
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
-
- if moB > 1 {
- s.prevOffset[2] = s.prevOffset[1]
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = mo
- return
- }
- // mo = s.adjustOffset(mo, ll, moB)
- // Inlined for rather big speedup
- if ll == 0 {
- // There is an exception though, when current sequence's literals_length = 0.
- // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
- // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
- mo++
- }
-
- if mo == 0 {
- mo = s.prevOffset[0]
- return
- }
- var temp int
- if mo == 3 {
- temp = s.prevOffset[0] - 1
- } else {
- temp = s.prevOffset[mo]
- }
-
- if temp == 0 {
- // 0 is not valid; input is corrupted; force offset to 1
- println("temp was 0")
- temp = 1
- }
-
- if mo != 1 {
- s.prevOffset[2] = s.prevOffset[1]
- }
- s.prevOffset[1] = s.prevOffset[0]
- s.prevOffset[0] = temp
- mo = temp
- return
-}
-
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
@@ -457,36 +507,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
s.prevOffset[0] = temp
return temp
}
-
-// mergeHistory will merge history.
-func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
- for i := uint(0); i < 3; i++ {
- var sNew, sHist *sequenceDec
- switch i {
- default:
- // same as "case 0":
- sNew = &s.litLengths
- sHist = &hist.litLengths
- case 1:
- sNew = &s.offsets
- sHist = &hist.offsets
- case 2:
- sNew = &s.matchLengths
- sHist = &hist.matchLengths
- }
- if sNew.repeat {
- if sHist.fse == nil {
- return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
- }
- continue
- }
- if sNew.fse == nil {
- return nil, fmt.Errorf("sequence stream %d, no fse found", i)
- }
- if sHist.fse != nil && !sHist.fse.preDefined {
- fseDecoderPool.Put(sHist.fse)
- }
- sHist.fse = sNew.fse
- }
- return hist, nil
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
new file mode 100644
index 0000000000000..191384adfd068
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -0,0 +1,379 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+package zstd
+
+import (
+ "fmt"
+
+ "github.com/klauspost/compress/internal/cpuinfo"
+)
+
+type decodeSyncAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ litRemain int
+ out []byte
+ outPosition int
+ literals []byte
+ litPosition int
+ history []byte
+ windowSize int
+ ll int // set on error (not for all errors, please refer to _generate/gen.go)
+ ml int // set on error (not for all errors, please refer to _generate/gen.go)
+ mo int // set on error (not for all errors, please refer to _generate/gen.go)
+}
+
+// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
+//go:noescape
+func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
+//go:noescape
+func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+
+// decode sequences from the stream with the provided history but without a dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ if len(s.dict) > 0 {
+ return false, nil
+ }
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
+ return false, nil
+ }
+
+ // FIXME: Using unsafe memory copies leads to rare, random crashes
+ // with fuzz testing. It is therefore disabled for now.
+ const useSafe = true
+ /*
+ useSafe := false
+ if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
+ useSafe = true
+ }
+ if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
+ useSafe = true
+ }
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ useSafe = true
+ }
+ */
+
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeSyncAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ iteration: s.nSeqs - 1,
+ litRemain: len(s.literals),
+ out: s.out,
+ outPosition: len(s.out),
+ literals: s.literals,
+ windowSize: s.windowSize,
+ history: hist,
+ }
+
+ s.seqSize = 0
+ startSize := len(s.out)
+
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
+ }
+ } else {
+ if useSafe {
+ errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
+ }
+ }
+ switch errCode {
+ case noError:
+ break
+
+ case errorMatchLenOfsMismatch:
+ return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
+
+ case errorMatchLenTooBig:
+ return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
+
+ case errorMatchOffTooBig:
+ return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ ctx.mo, ctx.outPosition+len(hist)-startSize)
+
+ case errorNotEnoughLiterals:
+ return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
+ ctx.ll, ctx.litRemain+ctx.ll)
+
+ case errorNotEnoughSpace:
+ size := ctx.outPosition + ctx.ll + ctx.ml
+ if debugDecoder {
+ println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
+ }
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+
+ default:
+ return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ return true, err
+ }
+
+ s.literals = s.literals[ctx.litPosition:]
+ t := ctx.outPosition
+ s.out = s.out[:t]
+
+ // Add final literals
+ s.out = append(s.out, s.literals...)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(s.out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
+ }
+ }
+
+ return true, nil
+}
+
+// --------------------------------------------------------------------------------
+
+type decodeAsmContext struct {
+ llTable []decSymbol
+ mlTable []decSymbol
+ ofTable []decSymbol
+ llState uint64
+ mlState uint64
+ ofState uint64
+ iteration int
+ seqs []seqVals
+ litRemain int
+}
+
+const noError = 0
+
+// error reported when mo == 0 && ml > 0
+const errorMatchLenOfsMismatch = 1
+
+// error reported when ml > maxMatchLen
+const errorMatchLenTooBig = 2
+
+// error reported when mo > available history or mo > s.windowSize
+const errorMatchOffTooBig = 3
+
+// error reported when the sum of literal lengths exeeceds the literal buffer size
+const errorNotEnoughLiterals = 4
+
+// error reported when capacity of `out` is too small
+const errorNotEnoughSpace = 5
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
+//go:noescape
+func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+
+ ctx := decodeAsmContext{
+ llTable: s.litLengths.fse.dt[:maxTablesize],
+ mlTable: s.matchLengths.fse.dt[:maxTablesize],
+ ofTable: s.offsets.fse.dt[:maxTablesize],
+ llState: uint64(s.litLengths.state.state),
+ mlState: uint64(s.matchLengths.state.state),
+ ofState: uint64(s.offsets.state.state),
+ seqs: seqs,
+ iteration: len(seqs) - 1,
+ litRemain: len(s.literals),
+ }
+
+ s.seqSize = 0
+ lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
+ var errCode int
+ if cpuinfo.HasBMI2() {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
+ }
+ } else {
+ if lte56bits {
+ errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
+ } else {
+ errCode = sequenceDecs_decode_amd64(s, br, &ctx)
+ }
+ }
+ if errCode != 0 {
+ i := len(seqs) - ctx.iteration - 1
+ switch errCode {
+ case errorMatchLenOfsMismatch:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+
+ case errorMatchLenTooBig:
+ ml := ctx.seqs[i].ml
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+
+ case errorNotEnoughLiterals:
+ ll := ctx.seqs[i].ll
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
+ }
+
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ }
+
+ if ctx.litRemain < 0 {
+ return fmt.Errorf("literal count is too big: total available %d, total requested %d",
+ len(s.literals), len(s.literals)-ctx.litRemain)
+ }
+
+ s.seqSize += ctx.litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// --------------------------------------------------------------------------------
+
+type executeAsmContext struct {
+ seqs []seqVals
+ seqIndex int
+ out []byte
+ history []byte
+ literals []byte
+ outPosition int
+ litPosition int
+ windowSize int
+}
+
+// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
+//
+// Returns false if a match offset is too big.
+//
+// Please refer to seqdec_generic.go for the reference implementation.
+//
+//go:noescape
+func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+
+// Same as above, but with safe memcopies
+//
+//go:noescape
+func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+
+// executeSimple handles cases when dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
+ addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ ctx := executeAsmContext{
+ seqs: seqs,
+ seqIndex: 0,
+ out: out,
+ history: hist,
+ outPosition: t,
+ litPosition: 0,
+ literals: s.literals,
+ windowSize: s.windowSize,
+ }
+ var ok bool
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
+ ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
+ } else {
+ ok = sequenceDecs_executeSimple_amd64(&ctx)
+ }
+ if !ok {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)",
+ seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
+ }
+ s.literals = s.literals[ctx.litPosition:]
+ t = ctx.outPosition
+
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
new file mode 100644
index 0000000000000..52e5703c26c46
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -0,0 +1,4099 @@
+// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT ·sequenceDecs_decode_amd64(SB), $8-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_end
+
+sequenceDecs_decode_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_of_update_zero:
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_ml_update_zero:
+ MOVQ AX, 8(R10)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_amd64_fill_2_end
+
+sequenceDecs_decode_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decode_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_amd64_ll_update_zero:
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_amd64_after_adjust
+
+sequenceDecs_decode_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_amd64_after_adjust
+
+sequenceDecs_decode_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_amd64_adjust_zero
+ JEQ sequenceDecs_decode_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_amd64_adjust_three
+ JMP sequenceDecs_decode_amd64_adjust_two
+
+sequenceDecs_decode_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_amd64_after_adjust:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: CMOV
+TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ MOVQ 104(AX), R10
+ MOVQ s+0(FP), AX
+ MOVQ 144(AX), R11
+ MOVQ 152(AX), R12
+ MOVQ 160(AX), R13
+
+sequenceDecs_decode_56_amd64_main_loop:
+ MOVQ (SP), R14
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decode_56_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R14
+ MOVQ (R14), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decode_56_amd64_fill_end
+
+sequenceDecs_decode_56_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decode_56_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decode_56_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R14
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R14), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
+
+sequenceDecs_decode_56_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_of_update_zero:
+ MOVQ AX, 16(R10)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_ml_update_zero:
+ MOVQ AX, 8(R10)
+
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R15
+ SHLQ CL, R15
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decode_56_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decode_56_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decode_56_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R15
+ ADDQ R15, AX
+
+sequenceDecs_decode_56_amd64_ll_update_zero:
+ MOVQ AX, (R10)
+
+ // Fill bitreader for state updates
+ MOVQ R14, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R14
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R14
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R14
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R14*1), CX
+ MOVQ DX, R15
+ MOVQ CX, BX
+ ROLQ CL, R15
+ MOVL $0x00000001, BP
+ MOVB R14, CL
+ SHLL CL, BP
+ DECL BP
+ ANDQ BP, R15
+ ADDQ R15, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decode_56_amd64_skip_update:
+ // Adjust offset
+ MOVQ 16(R10), CX
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0
+ MOVQ R12, R13
+ MOVQ R11, R12
+ MOVQ CX, R11
+ JMP sequenceDecs_decode_56_amd64_after_adjust
+
+sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0:
+ CMPQ (R10), $0x00000000
+ JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+
+sequenceDecs_decode_56_amd64_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero
+ MOVQ R11, CX
+ JMP sequenceDecs_decode_56_amd64_after_adjust
+
+sequenceDecs_decode_56_amd64_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_amd64_adjust_zero
+ JEQ sequenceDecs_decode_56_amd64_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_amd64_adjust_three
+ JMP sequenceDecs_decode_56_amd64_adjust_two
+
+sequenceDecs_decode_56_amd64_adjust_zero:
+ MOVQ R11, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_one:
+ MOVQ R12, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_two:
+ MOVQ R13, AX
+ JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid
+
+sequenceDecs_decode_56_amd64_adjust_three:
+ LEAQ -1(R11), AX
+
+sequenceDecs_decode_56_amd64_adjust_test_temp_valid:
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid
+ MOVQ $0x00000001, AX
+
+sequenceDecs_decode_56_amd64_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R12, R13
+ MOVQ R11, R12
+ MOVQ AX, R11
+ MOVQ AX, CX
+
+sequenceDecs_decode_56_amd64_after_adjust:
+ MOVQ CX, 16(R10)
+
+ // Check values
+ MOVQ 8(R10), AX
+ MOVQ (R10), R14
+ LEAQ (AX)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decode_56_amd64_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_amd64_match_len_ofs_ok:
+ ADDQ $0x18, R10
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decode_56_amd64_main_loop
+ MOVQ s+0(FP), AX
+ MOVQ R11, 144(AX)
+ MOVQ R12, 152(AX)
+ MOVQ R13, 160(AX)
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_amd64_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_end
+
+sequenceDecs_decode_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_bmi2_fill_2_end
+
+sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decode_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decode_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_bmi2_after_adjust
+
+sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_bmi2_after_adjust
+
+sequenceDecs_decode_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_bmi2_adjust_three
+ JMP sequenceDecs_decode_bmi2_adjust_two
+
+sequenceDecs_decode_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_bmi2_after_adjust:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
+// Requires: BMI, BMI2, CMOV
+TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ MOVQ 104(CX), R9
+ MOVQ s+0(FP), CX
+ MOVQ 144(CX), R10
+ MOVQ 152(CX), R11
+ MOVQ 160(CX), R12
+
+sequenceDecs_decode_56_bmi2_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R13
+ MOVQ (R13), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decode_56_bmi2_fill_end
+
+sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decode_56_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decode_56_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R13
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R13), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+
+sequenceDecs_decode_56_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 16(R9)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, 8(R9)
+
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R14
+ MOVQ AX, R15
+ LEAQ (DX)(R14*1), CX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R15, CX
+ MOVQ CX, (R9)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decode_56_bmi2_skip_update
+ LEAQ (SI)(DI*1), R14
+ ADDQ R8, R14
+ MOVBQZX R14, R14
+ LEAQ (DX)(R14*1), CX
+ MOVQ AX, R15
+ MOVQ CX, DX
+ ROLQ CL, R15
+ BZHIQ R14, R15, R15
+
+ // Update Offset State
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R15, CX
+ MOVQ $0x00001010, R14
+ BEXTRQ R14, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decode_56_bmi2_skip_update:
+ // Adjust offset
+ MOVQ 16(R9), CX
+ CMPQ R13, $0x01
+ JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0
+ MOVQ R11, R12
+ MOVQ R10, R11
+ MOVQ CX, R10
+ JMP sequenceDecs_decode_56_bmi2_after_adjust
+
+sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0:
+ CMPQ (R9), $0x00000000
+ JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero
+ INCQ CX
+ JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decode_56_bmi2_adjust_offset_maybezero:
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero
+ MOVQ R10, CX
+ JMP sequenceDecs_decode_56_bmi2_after_adjust
+
+sequenceDecs_decode_56_bmi2_adjust_offset_nonzero:
+ CMPQ CX, $0x01
+ JB sequenceDecs_decode_56_bmi2_adjust_zero
+ JEQ sequenceDecs_decode_56_bmi2_adjust_one
+ CMPQ CX, $0x02
+ JA sequenceDecs_decode_56_bmi2_adjust_three
+ JMP sequenceDecs_decode_56_bmi2_adjust_two
+
+sequenceDecs_decode_56_bmi2_adjust_zero:
+ MOVQ R10, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_one:
+ MOVQ R11, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_two:
+ MOVQ R12, R13
+ JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid
+
+sequenceDecs_decode_56_bmi2_adjust_three:
+ LEAQ -1(R10), R13
+
+sequenceDecs_decode_56_bmi2_adjust_test_temp_valid:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R13
+
+sequenceDecs_decode_56_bmi2_adjust_temp_valid:
+ CMPQ CX, $0x01
+ CMOVQNE R11, R12
+ MOVQ R10, R11
+ MOVQ R13, R10
+ MOVQ R13, CX
+
+sequenceDecs_decode_56_bmi2_after_adjust:
+ MOVQ CX, 16(R9)
+
+ // Check values
+ MOVQ 8(R9), R13
+ MOVQ (R9), R14
+ LEAQ (R13)(R14*1), R15
+ MOVQ s+0(FP), BP
+ ADDQ R15, 256(BP)
+ MOVQ ctx+16(FP), R15
+ SUBQ R14, 128(R15)
+ JS error_not_enough_literals
+ CMPQ R13, $0x00020002
+ JA sequenceDecs_decode_56_bmi2_error_match_len_too_big
+ TESTQ CX, CX
+ JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok
+ TESTQ R13, R13
+ JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
+ ADDQ $0x18, R9
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decode_56_bmi2_main_loop
+ MOVQ s+0(FP), CX
+ MOVQ R10, 144(CX)
+ MOVQ R11, 152(CX)
+ MOVQ R12, 160(CX)
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch:
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decode_56_bmi2_error_match_len_too_big:
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (SI)(R14*1), X0
+ MOVUPS X0, (BX)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, R11
+ JB copy_1
+ ADDQ R11, SI
+ ADDQ R11, BX
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+
+copy_4_end:
+ ADDQ R13, DI
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ BX, R12
+ ADDQ R13, BX
+
+copy_2:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
+// Requires: SSE
+TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9
+ MOVQ ctx+0(FP), R10
+ MOVQ 8(R10), CX
+ TESTQ CX, CX
+ JZ empty_seqs
+ MOVQ (R10), AX
+ MOVQ 24(R10), DX
+ MOVQ 32(R10), BX
+ MOVQ 80(R10), SI
+ MOVQ 104(R10), DI
+ MOVQ 120(R10), R8
+ MOVQ 56(R10), R9
+ MOVQ 64(R10), R10
+ ADDQ R10, R9
+
+ // seqsBase += 24 * seqIndex
+ LEAQ (DX)(DX*2), R11
+ SHLQ $0x03, R11
+ ADDQ R11, AX
+
+ // outBase += outPosition
+ ADDQ DI, BX
+
+main_loop:
+ MOVQ (AX), R11
+ MOVQ 16(AX), R12
+ MOVQ 8(AX), R13
+
+ // Copy literals
+ TESTQ R11, R11
+ JZ check_offset
+ MOVQ R11, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (SI), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, SI
+ ADDQ $0x10, BX
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(SI)(R14*1), SI
+ LEAQ 16(BX)(R14*1), BX
+ MOVUPS -16(SI), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ R11, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ R11, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (SI), R14
+ MOVB -1(SI)(R11*1), R15
+ MOVB R14, (BX)
+ MOVB R15, -1(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (SI), R14
+ MOVB 2(SI), R15
+ MOVW R14, (BX)
+ MOVB R15, 2(BX)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (SI), R14
+ MOVL -4(SI)(R11*1), R15
+ MOVL R14, (BX)
+ MOVL R15, -4(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (SI), R14
+ MOVQ -8(SI)(R11*1), R15
+ MOVQ R14, (BX)
+ MOVQ R15, -8(BX)(R11*1)
+ ADDQ R11, SI
+ ADDQ R11, BX
+
+copy_1_end:
+ ADDQ R11, DI
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ LEAQ (DI)(R10*1), R11
+ CMPQ R12, R11
+ JG error_match_off_too_big
+ CMPQ R12, R8
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, R11
+ SUBQ DI, R11
+ JLS copy_match
+ MOVQ R9, R14
+ SUBQ R11, R14
+ CMPQ R13, R11
+ JG copy_all_from_history
+ MOVQ R13, R11
+ SUBQ $0x10, R11
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R11
+ JAE copy_4_loop
+ LEAQ 16(R14)(R11*1), R14
+ LEAQ 16(BX)(R11*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), R11
+ MOVB 2(R14), R12
+ MOVW R11, (BX)
+ MOVB R12, 2(BX)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), R11
+ MOVL -4(R14)(R13*1), R12
+ MOVL R11, (BX)
+ MOVL R12, -4(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), R11
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ R11, (BX)
+ MOVQ R12, -8(BX)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, BX
+
+copy_4_end:
+ ADDQ R13, DI
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ R11, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R14
+ ADDQ $0x10, BX
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(BX)(R15*1), BX
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ R11, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ R11, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(R11*1), BP
+ MOVB R15, (BX)
+ MOVB BP, -1(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (BX)
+ MOVB BP, 2(BX)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(R11*1), BP
+ MOVL R15, (BX)
+ MOVL BP, -4(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(R11*1), BP
+ MOVQ R15, (BX)
+ MOVQ BP, -8(BX)(R11*1)
+ ADDQ R11, R14
+ ADDQ R11, BX
+
+copy_5_end:
+ ADDQ R11, DI
+ SUBQ R11, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ BX, R11
+ SUBQ R12, R11
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, DI
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (BX)
+ ADDQ $0x10, R11
+ ADDQ $0x10, BX
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(R11)(R12*1), R11
+ LEAQ 16(BX)(R12*1), BX
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(BX)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (R11), R12
+ MOVB -1(R11)(R13*1), R14
+ MOVB R12, (BX)
+ MOVB R14, -1(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (R11), R12
+ MOVB 2(R11), R14
+ MOVW R12, (BX)
+ MOVB R14, 2(BX)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (R11), R12
+ MOVL -4(R11)(R13*1), R14
+ MOVL R12, (BX)
+ MOVL R14, -4(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (R11), R12
+ MOVQ -8(R11)(R13*1), R14
+ MOVQ R12, (BX)
+ MOVQ R14, -8(BX)(R13*1)
+ ADDQ R13, R11
+ ADDQ R13, BX
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, DI
+
+copy_slow_3:
+ MOVB (R11), R12
+ MOVB R12, (BX)
+ INCQ R11
+ INCQ BX
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ ADDQ $0x18, AX
+ INCQ DX
+ CMPQ DX, CX
+ JB main_loop
+
+loop_finished:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+error_match_off_too_big:
+ // Return value
+ MOVB $0x00, ret+8(FP)
+
+ // Update the context
+ MOVQ ctx+0(FP), AX
+ MOVQ DX, 24(AX)
+ MOVQ DI, 104(AX)
+ MOVQ 80(AX), CX
+ SUBQ CX, SI
+ MOVQ SI, 112(AX)
+ RET
+
+empty_seqs:
+ // Return value
+ MOVB $0x01, ret+8(FP)
+ RET
+
+// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ XORQ CX, CX
+ MOVQ CX, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_end
+
+sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_of_update_zero:
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_ml_update_zero:
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_end
+
+sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_amd64_ll_update_zero:
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_amd64_after_adjust
+
+sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_amd64_after_adjust
+
+sequenceDecs_decodeSync_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_amd64_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (R11)(R14*1), X0
+ MOVUPS X0, (R10)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, AX
+ JB copy_1
+ ADDQ AX, R11
+ ADDQ AX, R10
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R12
+ MOVQ R10, CX
+ ADDQ R13, R10
+
+copy_2:
+ MOVUPS (AX), X0
+ MOVUPS X0, (CX)
+ ADDQ $0x10, AX
+ ADDQ $0x10, CX
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R12
+
+copy_slow_3:
+ MOVB (AX), CL
+ MOVB CL, (R10)
+ INCQ AX
+ INCQ R10
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ XORQ R9, R9
+ MOVQ R9, 8(SP)
+ MOVQ R9, 16(SP)
+ MOVQ R9, 24(SP)
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_end
+
+sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decodeSync_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_bmi2_after_adjust
+
+sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_bmi2_after_adjust
+
+sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_bmi2_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ XORQ R14, R14
+
+copy_1:
+ MOVUPS (R10)(R14*1), X0
+ MOVUPS X0, (R9)(R14*1)
+ ADDQ $0x10, R14
+ CMPQ R14, CX
+ JB copy_1
+ ADDQ CX, R10
+ ADDQ CX, R9
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+
+copy_4_end:
+ ADDQ R13, R11
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R11
+ MOVQ R9, R12
+ ADDQ R13, R9
+
+copy_2:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R12)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R12
+ SUBQ $0x10, R13
+ JHI copy_2
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R11
+
+copy_slow_3:
+ MOVB (CX), R12
+ MOVB R12, (R9)
+ INCQ CX
+ INCQ R9
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
+ MOVQ br+8(FP), AX
+ MOVQ 32(AX), DX
+ MOVBQZX 40(AX), BX
+ MOVQ 24(AX), SI
+ MOVQ (AX), AX
+ ADDQ SI, AX
+ MOVQ AX, (SP)
+ MOVQ ctx+16(FP), AX
+ MOVQ 72(AX), DI
+ MOVQ 80(AX), R8
+ MOVQ 88(AX), R9
+ XORQ CX, CX
+ MOVQ CX, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ 112(AX), R10
+ MOVQ 128(AX), CX
+ MOVQ CX, 32(SP)
+ MOVQ 144(AX), R11
+ MOVQ 136(AX), R12
+ MOVQ 200(AX), CX
+ MOVQ CX, 56(SP)
+ MOVQ 176(AX), CX
+ MOVQ CX, 48(SP)
+ MOVQ 184(AX), AX
+ MOVQ AX, 40(SP)
+ MOVQ 40(SP), AX
+ ADDQ AX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R10, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R12, R10
+
+sequenceDecs_decodeSync_safe_amd64_main_loop:
+ MOVQ (SP), R13
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_end:
+ // Update offset
+ MOVQ R9, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_of_update_zero:
+ MOVQ AX, 8(SP)
+
+ // Update match length
+ MOVQ R8, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
+ MOVQ AX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ SI, $0x08
+ JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+ MOVQ BX, AX
+ SHRQ $0x03, AX
+ SUBQ AX, R13
+ MOVQ (R13), DX
+ SUBQ AX, SI
+ ANDQ $0x07, BX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
+ CMPQ SI, $0x00
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ CMPQ BX, $0x07
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ SHLQ $0x08, DX
+ SUBQ $0x01, R13
+ SUBQ $0x01, SI
+ SUBQ $0x08, BX
+ MOVBQZX (R13), AX
+ ORQ AX, DX
+ JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_amd64_fill_2_end:
+ // Update literal length
+ MOVQ DI, AX
+ MOVQ BX, CX
+ MOVQ DX, R14
+ SHLQ CL, R14
+ MOVB AH, CL
+ SHRQ $0x20, AX
+ TESTQ CX, CX
+ JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ ADDQ CX, BX
+ CMPQ BX, $0x40
+ JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ CMPQ CX, $0x40
+ JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero
+ NEGQ CX
+ SHRQ CL, R14
+ ADDQ R14, AX
+
+sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
+ MOVQ AX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R13, (SP)
+ MOVQ R9, AX
+ SHRQ $0x08, AX
+ MOVBQZX AL, AX
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_amd64_skip_update
+
+ // Update Literal Length State
+ MOVBQZX DI, R13
+ SHRQ $0x10, DI
+ MOVWQZX DI, DI
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, DI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Match Length State
+ MOVBQZX R8, R13
+ SHRQ $0x10, R8
+ MOVWQZX R8, R8
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R8
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Offset State
+ MOVBQZX R9, R13
+ SHRQ $0x10, R9
+ MOVWQZX R9, R9
+ LEAQ (BX)(R13*1), CX
+ MOVQ DX, R14
+ MOVQ CX, BX
+ ROLQ CL, R14
+ MOVL $0x00000001, R15
+ MOVB R13, CL
+ SHLL CL, R15
+ DECL R15
+ ANDQ R15, R14
+ ADDQ R14, R9
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R9*8), R9
+
+sequenceDecs_decodeSync_safe_amd64_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ AX, $0x01
+ JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_amd64_after_adjust
+
+sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero:
+ MOVQ R13, AX
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, AX
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(AX*8), R14
+ JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip
+ MOVQ 152(CX), AX
+ MOVQ AX, 160(CX)
+
+sequenceDecs_decodeSync_safe_amd64_adjust_skip:
+ MOVQ 144(CX), AX
+ MOVQ AX, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_amd64_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), AX
+ MOVQ 24(SP), CX
+ LEAQ (AX)(CX*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ CX, 104(R14)
+ JS error_not_enough_literals
+ CMPQ AX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok
+ TESTQ AX, AX
+ JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok:
+ MOVQ 24(SP), AX
+ MOVQ 8(SP), CX
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (AX)(R13*1), R14
+ ADDQ R10, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ AX, AX
+ JZ check_offset
+ MOVQ AX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R11), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R11
+ ADDQ $0x10, R10
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R11)(R14*1), R11
+ LEAQ 16(R10)(R14*1), R10
+ MOVUPS -16(R11), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ AX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ AX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R11), R14
+ MOVB -1(R11)(AX*1), R15
+ MOVB R14, (R10)
+ MOVB R15, -1(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R11), R14
+ MOVB 2(R11), R15
+ MOVW R14, (R10)
+ MOVB R15, 2(R10)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R11), R14
+ MOVL -4(R11)(AX*1), R15
+ MOVL R14, (R10)
+ MOVL R15, -4(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (R11), R14
+ MOVQ -8(R11)(AX*1), R15
+ MOVQ R14, (R10)
+ MOVQ R15, -8(R10)(AX*1)
+ ADDQ AX, R11
+ ADDQ AX, R10
+
+copy_1_end:
+ ADDQ AX, R12
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R12, AX
+ ADDQ 40(SP), AX
+ CMPQ CX, AX
+ JG error_match_off_too_big
+ CMPQ CX, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ CX, AX
+ SUBQ R12, AX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ AX, R14
+ CMPQ R13, AX
+ JG copy_all_from_history
+ MOVQ R13, AX
+ SUBQ $0x10, AX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, AX
+ JAE copy_4_loop
+ LEAQ 16(R14)(AX*1), R14
+ LEAQ 16(R10)(AX*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), AX
+ MOVB 2(R14), CL
+ MOVW AX, (R10)
+ MOVB CL, 2(R10)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), AX
+ MOVL -4(R14)(R13*1), CX
+ MOVL AX, (R10)
+ MOVL CX, -4(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), AX
+ MOVQ -8(R14)(R13*1), CX
+ MOVQ AX, (R10)
+ MOVQ CX, -8(R10)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R10
+
+copy_4_end:
+ ADDQ R13, R12
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ AX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R10
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R10)(R15*1), R10
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ AX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ AX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(AX*1), BP
+ MOVB R15, (R10)
+ MOVB BP, -1(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R10)
+ MOVB BP, 2(R10)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(AX*1), BP
+ MOVL R15, (R10)
+ MOVL BP, -4(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(AX*1), BP
+ MOVQ R15, (R10)
+ MOVQ BP, -8(R10)(AX*1)
+ ADDQ AX, R14
+ ADDQ AX, R10
+
+copy_5_end:
+ ADDQ AX, R12
+ SUBQ AX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R10, AX
+ SUBQ CX, AX
+
+ // ml <= mo
+ CMPQ R13, CX
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R12
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (AX), X0
+ MOVUPS X0, (R10)
+ ADDQ $0x10, AX
+ ADDQ $0x10, R10
+ SUBQ $0x10, CX
+ JAE copy_2_loop
+ LEAQ 16(AX)(CX*1), AX
+ LEAQ 16(R10)(CX*1), R10
+ MOVUPS -16(AX), X0
+ MOVUPS X0, -16(R10)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (AX), CL
+ MOVB -1(AX)(R13*1), R14
+ MOVB CL, (R10)
+ MOVB R14, -1(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (AX), CX
+ MOVB 2(AX), R14
+ MOVW CX, (R10)
+ MOVB R14, 2(R10)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (AX), CX
+ MOVL -4(AX)(R13*1), R14
+ MOVL CX, (R10)
+ MOVL R14, -4(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (AX), CX
+ MOVQ -8(AX)(R13*1), R14
+ MOVQ CX, (R10)
+ MOVQ R14, -8(R10)(R13*1)
+ ADDQ R13, AX
+ ADDQ R13, R10
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R12
+
+copy_slow_3:
+ MOVB (AX), CL
+ MOVB CL, (R10)
+ INCQ AX
+ INCQ R10
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), AX
+ DECQ 96(AX)
+ JNS sequenceDecs_decodeSync_safe_amd64_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), AX
+ MOVQ DX, 32(AX)
+ MOVB BL, 40(AX)
+ MOVQ SI, 24(AX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R12, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R11
+ MOVQ R11, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R12, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
+
+// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
+// Requires: BMI, BMI2, CMOV, SSE
+TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
+ MOVQ br+8(FP), CX
+ MOVQ 32(CX), AX
+ MOVBQZX 40(CX), DX
+ MOVQ 24(CX), BX
+ MOVQ (CX), CX
+ ADDQ BX, CX
+ MOVQ CX, (SP)
+ MOVQ ctx+16(FP), CX
+ MOVQ 72(CX), SI
+ MOVQ 80(CX), DI
+ MOVQ 88(CX), R8
+ XORQ R9, R9
+ MOVQ R9, 8(SP)
+ MOVQ R9, 16(SP)
+ MOVQ R9, 24(SP)
+ MOVQ 112(CX), R9
+ MOVQ 128(CX), R10
+ MOVQ R10, 32(SP)
+ MOVQ 144(CX), R10
+ MOVQ 136(CX), R11
+ MOVQ 200(CX), R12
+ MOVQ R12, 56(SP)
+ MOVQ 176(CX), R12
+ MOVQ R12, 48(SP)
+ MOVQ 184(CX), CX
+ MOVQ CX, 40(SP)
+ MOVQ 40(SP), CX
+ ADDQ CX, 48(SP)
+
+ // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ ADDQ R9, 32(SP)
+
+ // outBase += outPosition
+ ADDQ R11, R9
+
+sequenceDecs_decodeSync_safe_bmi2_main_loop:
+ MOVQ (SP), R12
+
+ // Fill bitreader to have enough for the offset and match length.
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_end:
+ // Update offset
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ R8, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 8(SP)
+
+ // Update match length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, DI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ DI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 16(SP)
+
+ // Fill bitreader to have enough for the remaining
+ CMPQ BX, $0x08
+ JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+ MOVQ DX, CX
+ SHRQ $0x03, CX
+ SUBQ CX, R12
+ MOVQ (R12), AX
+ SUBQ CX, BX
+ ANDQ $0x07, DX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
+ CMPQ BX, $0x00
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ CMPQ DX, $0x07
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ SHLQ $0x08, AX
+ SUBQ $0x01, R12
+ SUBQ $0x01, BX
+ SUBQ $0x08, DX
+ MOVBQZX (R12), CX
+ ORQ CX, AX
+ JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+
+sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
+ // Update literal length
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, SI, R13
+ MOVQ AX, R14
+ LEAQ (DX)(R13*1), CX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+ MOVQ CX, DX
+ MOVQ SI, CX
+ SHRQ $0x20, CX
+ ADDQ R14, CX
+ MOVQ CX, 24(SP)
+
+ // Fill bitreader for state updates
+ MOVQ R12, (SP)
+ MOVQ $0x00000808, CX
+ BEXTRQ CX, R8, R12
+ MOVQ ctx+16(FP), CX
+ CMPQ 96(CX), $0x00
+ JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
+ LEAQ (SI)(DI*1), R13
+ ADDQ R8, R13
+ MOVBQZX R13, R13
+ LEAQ (DX)(R13*1), CX
+ MOVQ AX, R14
+ MOVQ CX, DX
+ ROLQ CL, R14
+ BZHIQ R13, R14, R14
+
+ // Update Offset State
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, R8, R8
+ ADDQ CX, R8
+
+ // Load ctx.ofTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 48(CX), CX
+ MOVQ (CX)(R8*8), R8
+
+ // Update Match Length State
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, DI, DI
+ ADDQ CX, DI
+
+ // Load ctx.mlTable
+ MOVQ ctx+16(FP), CX
+ MOVQ 24(CX), CX
+ MOVQ (CX)(DI*8), DI
+
+ // Update Literal Length State
+ BZHIQ SI, R14, CX
+ MOVQ $0x00001010, R13
+ BEXTRQ R13, SI, SI
+ ADDQ CX, SI
+
+ // Load ctx.llTable
+ MOVQ ctx+16(FP), CX
+ MOVQ (CX), CX
+ MOVQ (CX)(SI*8), SI
+
+sequenceDecs_decodeSync_safe_bmi2_skip_update:
+ // Adjust offset
+ MOVQ s+0(FP), CX
+ MOVQ 8(SP), R13
+ CMPQ R12, $0x01
+ JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0
+ MOVUPS 144(CX), X0
+ MOVQ R13, 144(CX)
+ MOVUPS X0, 152(CX)
+ JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0:
+ CMPQ 24(SP), $0x00000000
+ JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero
+ INCQ R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero:
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero
+ MOVQ 144(CX), R13
+ JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero:
+ MOVQ R13, R12
+ XORQ R14, R14
+ MOVQ $-1, R15
+ CMPQ R13, $0x03
+ CMOVQEQ R14, R12
+ CMOVQEQ R15, R14
+ ADDQ 144(CX)(R12*8), R14
+ JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid
+ MOVQ $0x00000001, R14
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid:
+ CMPQ R13, $0x01
+ JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip
+ MOVQ 152(CX), R12
+ MOVQ R12, 160(CX)
+
+sequenceDecs_decodeSync_safe_bmi2_adjust_skip:
+ MOVQ 144(CX), R12
+ MOVQ R12, 152(CX)
+ MOVQ R14, 144(CX)
+ MOVQ R14, R13
+
+sequenceDecs_decodeSync_safe_bmi2_after_adjust:
+ MOVQ R13, 8(SP)
+
+ // Check values
+ MOVQ 16(SP), CX
+ MOVQ 24(SP), R12
+ LEAQ (CX)(R12*1), R14
+ MOVQ s+0(FP), R15
+ ADDQ R14, 256(R15)
+ MOVQ ctx+16(FP), R14
+ SUBQ R12, 104(R14)
+ JS error_not_enough_literals
+ CMPQ CX, $0x00020002
+ JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big
+ TESTQ R13, R13
+ JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok
+ TESTQ CX, CX
+ JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch
+
+sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok:
+ MOVQ 24(SP), CX
+ MOVQ 8(SP), R12
+ MOVQ 16(SP), R13
+
+ // Check if we have enough space in s.out
+ LEAQ (CX)(R13*1), R14
+ ADDQ R9, R14
+ CMPQ R14, 32(SP)
+ JA error_not_enough_space
+
+ // Copy literals
+ TESTQ CX, CX
+ JZ check_offset
+ MOVQ CX, R14
+ SUBQ $0x10, R14
+ JB copy_1_small
+
+copy_1_loop:
+ MOVUPS (R10), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R10
+ ADDQ $0x10, R9
+ SUBQ $0x10, R14
+ JAE copy_1_loop
+ LEAQ 16(R10)(R14*1), R10
+ LEAQ 16(R9)(R14*1), R9
+ MOVUPS -16(R10), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_1_end
+
+copy_1_small:
+ CMPQ CX, $0x03
+ JE copy_1_move_3
+ JB copy_1_move_1or2
+ CMPQ CX, $0x08
+ JB copy_1_move_4through7
+ JMP copy_1_move_8through16
+
+copy_1_move_1or2:
+ MOVB (R10), R14
+ MOVB -1(R10)(CX*1), R15
+ MOVB R14, (R9)
+ MOVB R15, -1(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_3:
+ MOVW (R10), R14
+ MOVB 2(R10), R15
+ MOVW R14, (R9)
+ MOVB R15, 2(R9)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_4through7:
+ MOVL (R10), R14
+ MOVL -4(R10)(CX*1), R15
+ MOVL R14, (R9)
+ MOVL R15, -4(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+ JMP copy_1_end
+
+copy_1_move_8through16:
+ MOVQ (R10), R14
+ MOVQ -8(R10)(CX*1), R15
+ MOVQ R14, (R9)
+ MOVQ R15, -8(R9)(CX*1)
+ ADDQ CX, R10
+ ADDQ CX, R9
+
+copy_1_end:
+ ADDQ CX, R11
+
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
+check_offset:
+ MOVQ R11, CX
+ ADDQ 40(SP), CX
+ CMPQ R12, CX
+ JG error_match_off_too_big
+ CMPQ R12, 56(SP)
+ JG error_match_off_too_big
+
+ // Copy match from history
+ MOVQ R12, CX
+ SUBQ R11, CX
+ JLS copy_match
+ MOVQ 48(SP), R14
+ SUBQ CX, R14
+ CMPQ R13, CX
+ JG copy_all_from_history
+ MOVQ R13, CX
+ SUBQ $0x10, CX
+ JB copy_4_small
+
+copy_4_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, CX
+ JAE copy_4_loop
+ LEAQ 16(R14)(CX*1), R14
+ LEAQ 16(R9)(CX*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_4_end
+
+copy_4_small:
+ CMPQ R13, $0x03
+ JE copy_4_move_3
+ CMPQ R13, $0x08
+ JB copy_4_move_4through7
+ JMP copy_4_move_8through16
+
+copy_4_move_3:
+ MOVW (R14), CX
+ MOVB 2(R14), R12
+ MOVW CX, (R9)
+ MOVB R12, 2(R9)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_4through7:
+ MOVL (R14), CX
+ MOVL -4(R14)(R13*1), R12
+ MOVL CX, (R9)
+ MOVL R12, -4(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+ JMP copy_4_end
+
+copy_4_move_8through16:
+ MOVQ (R14), CX
+ MOVQ -8(R14)(R13*1), R12
+ MOVQ CX, (R9)
+ MOVQ R12, -8(R9)(R13*1)
+ ADDQ R13, R14
+ ADDQ R13, R9
+
+copy_4_end:
+ ADDQ R13, R11
+ JMP handle_loop
+ JMP loop_finished
+
+copy_all_from_history:
+ MOVQ CX, R15
+ SUBQ $0x10, R15
+ JB copy_5_small
+
+copy_5_loop:
+ MOVUPS (R14), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, R14
+ ADDQ $0x10, R9
+ SUBQ $0x10, R15
+ JAE copy_5_loop
+ LEAQ 16(R14)(R15*1), R14
+ LEAQ 16(R9)(R15*1), R9
+ MOVUPS -16(R14), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_5_end
+
+copy_5_small:
+ CMPQ CX, $0x03
+ JE copy_5_move_3
+ JB copy_5_move_1or2
+ CMPQ CX, $0x08
+ JB copy_5_move_4through7
+ JMP copy_5_move_8through16
+
+copy_5_move_1or2:
+ MOVB (R14), R15
+ MOVB -1(R14)(CX*1), BP
+ MOVB R15, (R9)
+ MOVB BP, -1(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_3:
+ MOVW (R14), R15
+ MOVB 2(R14), BP
+ MOVW R15, (R9)
+ MOVB BP, 2(R9)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_4through7:
+ MOVL (R14), R15
+ MOVL -4(R14)(CX*1), BP
+ MOVL R15, (R9)
+ MOVL BP, -4(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+ JMP copy_5_end
+
+copy_5_move_8through16:
+ MOVQ (R14), R15
+ MOVQ -8(R14)(CX*1), BP
+ MOVQ R15, (R9)
+ MOVQ BP, -8(R9)(CX*1)
+ ADDQ CX, R14
+ ADDQ CX, R9
+
+copy_5_end:
+ ADDQ CX, R11
+ SUBQ CX, R13
+
+ // Copy match from the current buffer
+copy_match:
+ MOVQ R9, CX
+ SUBQ R12, CX
+
+ // ml <= mo
+ CMPQ R13, R12
+ JA copy_overlapping_match
+
+ // Copy non-overlapping match
+ ADDQ R13, R11
+ MOVQ R13, R12
+ SUBQ $0x10, R12
+ JB copy_2_small
+
+copy_2_loop:
+ MOVUPS (CX), X0
+ MOVUPS X0, (R9)
+ ADDQ $0x10, CX
+ ADDQ $0x10, R9
+ SUBQ $0x10, R12
+ JAE copy_2_loop
+ LEAQ 16(CX)(R12*1), CX
+ LEAQ 16(R9)(R12*1), R9
+ MOVUPS -16(CX), X0
+ MOVUPS X0, -16(R9)
+ JMP copy_2_end
+
+copy_2_small:
+ CMPQ R13, $0x03
+ JE copy_2_move_3
+ JB copy_2_move_1or2
+ CMPQ R13, $0x08
+ JB copy_2_move_4through7
+ JMP copy_2_move_8through16
+
+copy_2_move_1or2:
+ MOVB (CX), R12
+ MOVB -1(CX)(R13*1), R14
+ MOVB R12, (R9)
+ MOVB R14, -1(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_3:
+ MOVW (CX), R12
+ MOVB 2(CX), R14
+ MOVW R12, (R9)
+ MOVB R14, 2(R9)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_4through7:
+ MOVL (CX), R12
+ MOVL -4(CX)(R13*1), R14
+ MOVL R12, (R9)
+ MOVL R14, -4(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+ JMP copy_2_end
+
+copy_2_move_8through16:
+ MOVQ (CX), R12
+ MOVQ -8(CX)(R13*1), R14
+ MOVQ R12, (R9)
+ MOVQ R14, -8(R9)(R13*1)
+ ADDQ R13, CX
+ ADDQ R13, R9
+
+copy_2_end:
+ JMP handle_loop
+
+ // Copy overlapping match
+copy_overlapping_match:
+ ADDQ R13, R11
+
+copy_slow_3:
+ MOVB (CX), R12
+ MOVB R12, (R9)
+ INCQ CX
+ INCQ R9
+ DECQ R13
+ JNZ copy_slow_3
+
+handle_loop:
+ MOVQ ctx+16(FP), CX
+ DECQ 96(CX)
+ JNS sequenceDecs_decodeSync_safe_bmi2_main_loop
+
+loop_finished:
+ MOVQ br+8(FP), CX
+ MOVQ AX, 32(CX)
+ MOVB DL, 40(CX)
+ MOVQ BX, 24(CX)
+
+ // Update the context
+ MOVQ ctx+16(FP), AX
+ MOVQ R11, 136(AX)
+ MOVQ 144(AX), CX
+ SUBQ CX, R10
+ MOVQ R10, 168(AX)
+
+ // Return success
+ MOVQ $0x00000000, ret+24(FP)
+ RET
+
+ // Return with match length error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch:
+ MOVQ 16(SP), AX
+ MOVQ ctx+16(FP), CX
+ MOVQ AX, 216(CX)
+ MOVQ $0x00000001, ret+24(FP)
+ RET
+
+ // Return with match too long error
+sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ $0x00000002, ret+24(FP)
+ RET
+
+ // Return with match offset too long error
+error_match_off_too_big:
+ MOVQ ctx+16(FP), AX
+ MOVQ 8(SP), CX
+ MOVQ CX, 224(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000003, ret+24(FP)
+ RET
+
+ // Return with not enough literals error
+error_not_enough_literals:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ $0x00000004, ret+24(FP)
+ RET
+
+ // Return with not enough output space error
+error_not_enough_space:
+ MOVQ ctx+16(FP), AX
+ MOVQ 24(SP), CX
+ MOVQ CX, 208(AX)
+ MOVQ 16(SP), CX
+ MOVQ CX, 216(AX)
+ MOVQ R11, 136(AX)
+ MOVQ $0x00000005, ret+24(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
new file mode 100644
index 0000000000000..ac2a80d291114
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -0,0 +1,237 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+package zstd
+
+import (
+ "fmt"
+ "io"
+)
+
+// decode sequences from the stream with the provided history but without dictionary.
+func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
+ return false, nil
+}
+
+// decode sequences from the stream without the provided history.
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ // Grab full sizes tables, to avoid bounds checks.
+ llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+ llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ s.seqSize = 0
+ litRemain := len(s.literals)
+
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+ for i := range seqs {
+ var ll, mo, ml int
+ if br.off > 4+((maxOffsetBits+16+16)>>3) {
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("WARNING: temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
+ br.fillFast()
+ } else {
+ if br.overread() {
+ if debugDecoder {
+ printf("reading sequence %d, exceeded available data\n", i)
+ }
+ return io.ErrUnexpectedEOF
+ }
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
+ br.fill()
+ }
+
+ if debugSequences {
+ println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+ }
+ // Evaluate.
+ // We might be doing this async, so do it early.
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+ }
+ s.seqSize += ll + ml
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ litRemain -= ll
+ if litRemain < 0 {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
+ }
+ seqs[i] = seqVals{
+ ll: ll,
+ ml: ml,
+ mo: mo,
+ }
+ if i == len(seqs)-1 {
+ // This is the last sequence, so we shouldn't update state.
+ break
+ }
+
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+ if nBits == 0 {
+ llState = llTable[llState.newState()&maxTableMask]
+ mlState = mlTable[mlState.newState()&maxTableMask]
+ ofState = ofTable[ofState.newState()&maxTableMask]
+ } else {
+ bits := br.get32BitsFast(nBits)
+ lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+ llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits >> (ofState.nbBits() & 31))
+ lowBits &= bitMask[mlState.nbBits()&15]
+ mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+ ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+ }
+ }
+ s.seqSize += litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// executeSimple handles cases when a dictionary is not used.
+func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize > cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Malformed input
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into the current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+
+ // We must be in the current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index 967f29b3120e9..29c15c8c4efe5 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -18,36 +18,58 @@ const ZipMethodWinZip = 93
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT
const ZipMethodPKWare = 20
-var zipReaderPool sync.Pool
+// zipReaderPool is the default reader pool.
+var zipReaderPool = sync.Pool{New: func() interface{} {
+ z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1))
+ if err != nil {
+ panic(err)
+ }
+ return z
+}}
-// newZipReader cannot be used since we would leak goroutines...
-func newZipReader(r io.Reader) io.ReadCloser {
- dec, ok := zipReaderPool.Get().(*Decoder)
- if ok {
- dec.Reset(r)
- } else {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
+// newZipReader creates a pooled zip decompressor.
+func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ pool := &zipReaderPool
+ if len(opts) > 0 {
+ opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
+ // Force concurrency 1
+ opts = append(opts, WithDecoderConcurrency(1))
+ // Create our own pool
+ pool = &sync.Pool{}
+ }
+ return func(r io.Reader) io.ReadCloser {
+ dec, ok := pool.Get().(*Decoder)
+ if ok {
+ dec.Reset(r)
+ } else {
+ d, err := NewReader(r, opts...)
+ if err != nil {
+ panic(err)
+ }
+ dec = d
}
- dec = d
+ return &pooledZipReader{dec: dec, pool: pool}
}
- return &pooledZipReader{dec: dec}
}
type pooledZipReader struct {
- mu sync.Mutex // guards Close and Read
- dec *Decoder
+ mu sync.Mutex // guards Close and Read
+ pool *sync.Pool
+ dec *Decoder
}
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.dec == nil {
- return 0, errors.New("Read after Close")
+ return 0, errors.New("read after close or EOF")
}
dec, err := r.dec.Read(p)
-
+ if err == io.EOF {
+ r.dec.Reset(nil)
+ r.pool.Put(r.dec)
+ r.dec = nil
+ }
return dec, err
}
@@ -57,7 +79,7 @@ func (r *pooledZipReader) Close() error {
var err error
if r.dec != nil {
err = r.dec.Reset(nil)
- zipReaderPool.Put(r.dec)
+ r.pool.Put(r.dec)
r.dec = nil
}
return err
@@ -111,12 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
-func ZipDecompressor() func(r io.Reader) io.ReadCloser {
- return func(r io.Reader) io.ReadCloser {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
- }
- return d.IOReadCloser()
- }
+// Options can be specified. WithDecoderConcurrency(1) is forced,
+// and by default a 128MB maximum decompression window is specified.
+// The window size can be overridden if required.
+func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
+ return newZipReader(opts...)
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index ef1d49a009cc7..3eb3f1c82661a 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -39,6 +39,9 @@ const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
+// fcsUnknown is used for unknown frame content size.
+const fcsUnknown = math.MaxUint64
+
var (
// ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input.
@@ -52,6 +55,10 @@ var (
// Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small")
+ // ErrUnexpectedBlockSize is returned when a block has unexpected size.
+ // Typically returned on invalid input.
+ ErrUnexpectedBlockSize = errors.New("unexpected block size")
+
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
@@ -75,6 +82,10 @@ var (
// This is only returned if SingleSegment is specified on the frame.
ErrFrameSizeExceeded = errors.New("frame size exceeded")
+ // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
+ // This is only returned if SingleSegment is specified on the frame.
+ ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
+
// ErrCRCMismatch is returned if CRC mismatches.
ErrCRCMismatch = errors.New("CRC check failed")
@@ -99,17 +110,6 @@ func printf(format string, a ...interface{}) {
}
}
-// matchLenFast does matching, but will not match the last up to 7 bytes.
-func matchLenFast(a, b []byte) int {
- endI := len(a) & (math.MaxInt32 - 7)
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- return i + bits.TrailingZeros64(diff)>>3
- }
- }
- return endI
-}
-
// matchLen returns the maximum length.
// a must be the shortest of the two.
// The function also returns whether all bytes matched.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 857d89bf4a762..c0d1664254619 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -713,13 +713,14 @@ github.com/json-iterator/go
# github.com/julienschmidt/httprouter v1.3.0
## explicit; go 1.7
github.com/julienschmidt/httprouter
-# github.com/klauspost/compress v1.14.1
-## explicit; go 1.15
+# github.com/klauspost/compress v1.15.11
+## explicit; go 1.17
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/gzip
github.com/klauspost/compress/huff0
+github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/s2
github.com/klauspost/compress/zstd
|
chunks
|
update klauspost/compress package to v1.15.11 (#7263)
|
be24862f53fb44616662c924b7ef6d237297606c
|
2024-11-20 21:39:27
|
renovate[bot]
|
fix(deps): update module github.com/imdario/mergo to v1 (#15035)
| false
|
diff --git a/clients/pkg/promtail/targets/serverutils/config.go b/clients/pkg/promtail/targets/serverutils/config.go
index 451a3d0953c76..add94dc8157f4 100644
--- a/clients/pkg/promtail/targets/serverutils/config.go
+++ b/clients/pkg/promtail/targets/serverutils/config.go
@@ -3,8 +3,8 @@ package serverutils
import (
"flag"
+ "dario.cat/mergo"
"github.com/grafana/dskit/server"
- "github.com/imdario/mergo"
)
// MergeWithDefaults applies server.Config defaults to a given and different server.Config.
diff --git a/go.mod b/go.mod
index b9ffb238ad501..ef640ddad17fc 100644
--- a/go.mod
+++ b/go.mod
@@ -8,6 +8,7 @@ require (
cloud.google.com/go/bigtable v1.33.0
cloud.google.com/go/pubsub v1.45.1
cloud.google.com/go/storage v1.47.0
+ dario.cat/mergo v1.0.1
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Azure/go-autorest/autorest/adal v0.9.24
@@ -59,7 +60,6 @@ require (
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
github.com/hashicorp/consul/api v1.30.0
github.com/hashicorp/golang-lru/v2 v2.0.7
- github.com/imdario/mergo v0.3.16
github.com/influxdata/telegraf v1.16.3
github.com/jmespath/go-jmespath v0.4.0
github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a
@@ -159,7 +159,6 @@ require (
cloud.google.com/go/auth v0.10.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect
cloud.google.com/go/monitoring v1.21.2 // indirect
- dario.cat/mergo v1.0.1 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
@@ -173,6 +172,7 @@ require (
github.com/goccy/go-json v0.10.3 // indirect
github.com/gorilla/handlers v1.5.2 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
+ github.com/imdario/mergo v0.3.16 // indirect
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
diff --git a/integration/util/merger.go b/integration/util/merger.go
index dbd88a4205fc9..5458c1ce3f125 100644
--- a/integration/util/merger.go
+++ b/integration/util/merger.go
@@ -3,7 +3,7 @@ package util
import (
"fmt"
- "github.com/imdario/mergo"
+ "dario.cat/mergo"
"gopkg.in/yaml.v2"
)
diff --git a/pkg/ruler/registry.go b/pkg/ruler/registry.go
index 29297fcab5a74..67ef1d9bbf32c 100644
--- a/pkg/ruler/registry.go
+++ b/pkg/ruler/registry.go
@@ -8,10 +8,10 @@ import (
"sync"
"time"
+ "dario.cat/mergo"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/user"
- "github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
promConfig "github.com/prometheus/common/config"
|
fix
|
update module github.com/imdario/mergo to v1 (#15035)
|
66b2721cfa27f892abae0b8d024490cbeb77afa6
|
2025-02-04 22:12:03
|
Robert Fratto
|
chore(dataobj): do not panic on unrecognized compression type (#16088)
| false
|
diff --git a/pkg/dataobj/internal/dataset/page.go b/pkg/dataobj/internal/dataset/page.go
index c0b2653bbc65e..e1dff653b4fce 100644
--- a/pkg/dataobj/internal/dataset/page.go
+++ b/pkg/dataobj/internal/dataset/page.go
@@ -112,9 +112,12 @@ func (p *MemPage) reader(compression datasetmd.CompressionType) (presence io.Rea
case datasetmd.COMPRESSION_TYPE_ZSTD:
zr := &fixedZstdReader{page: p, data: compressedValuesData}
return bitmapReader, zr, nil
- }
- panic(fmt.Sprintf("dataset.MemPage.reader: unknown compression type %q", compression.String()))
+ default:
+ // We do *not* want to panic here, as we may be trying to read a page from
+ // a newer format.
+ return nil, nil, fmt.Errorf("unknown compression type %q", compression.String())
+ }
}
var snappyPool = sync.Pool{
|
chore
|
do not panic on unrecognized compression type (#16088)
|
1b6d0bf3a099e8dad0d7908e0c6eefa4a92f80de
|
2022-08-20 19:50:02
|
Jonathan
|
feat: add kms ecryption (#6926)
| false
|
diff --git a/tools/lambda-promtail/main.tf b/tools/lambda-promtail/main.tf
index ba71bb198a26c..f045ceddef9b9 100644
--- a/tools/lambda-promtail/main.tf
+++ b/tools/lambda-promtail/main.tf
@@ -66,6 +66,7 @@ resource "aws_lambda_function" "lambda_promtail" {
image_uri = var.lambda_promtail_image
function_name = "lambda_promtail"
role = aws_iam_role.iam_for_lambda.arn
+ kms_key_arn = var.kms_key_arn
timeout = 60
memory_size = 128
diff --git a/tools/lambda-promtail/variables.tf b/tools/lambda-promtail/variables.tf
index 8401622b65427..1ecaf1fe78d1b 100644
--- a/tools/lambda-promtail/variables.tf
+++ b/tools/lambda-promtail/variables.tf
@@ -70,3 +70,9 @@ variable "lambda_vpc_security_groups" {
description = "List of security group IDs associated with the Lambda function."
default = []
}
+
+variable "kms_key_arn" {
+ type = string
+ description = "kms key arn for encryp env vars."
+ default = ""
+}
|
feat
|
add kms ecryption (#6926)
|
c1ca782dbd4fa3515ad7dcc107d1306224c64f33
|
2023-04-04 20:27:02
|
Periklis Tsirakidis
|
operator: Remove static placeholder suffix for openshift bundle (#8998)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index da3fe440e19ed..bcc54554e2a67 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [8998](https://github.com/grafana/loki/pull/8998) **periklis**: Remove static placeholder suffix for openshift bundle
- [8930](https://github.com/grafana/loki/pull/8930) **periklis**: Fix makefile target operatorhub
- [8911](https://github.com/grafana/loki/pull/8911) **aminesnow**: Update LokiStack annotaion on RulerConfig delete
diff --git a/operator/Makefile b/operator/Makefile
index ff1594d2a7bff..ec0ce23b56e3d 100644
--- a/operator/Makefile
+++ b/operator/Makefile
@@ -41,7 +41,7 @@ ifeq ($(VARIANT), openshift)
ifeq ($(REGISTRY_BASE), $(REGISTRY_BASE_COMMUNITY))
REGISTRY_BASE = $(REGISTRY_BASE_OPENSHIFT)
endif
- VERSION = v0.1.0-placeholder
+ VERSION = v0.1.0
CHANNELS = stable
DEFAULT_CHANNEL = stable
LOKI_OPERATOR_NS = openshift-operators-redhat
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index a2a8cec876047..2e85a1bf7f8ad 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:v0.1.0
- createdAt: "2023-03-27T19:03:01Z"
+ createdAt: "2023-04-03T19:44:20Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
@@ -173,7 +173,7 @@ metadata:
operatorframework.io/arch.arm64: supported
operatorframework.io/arch.ppc64le: supported
operatorframework.io/arch.s390x: supported
- name: loki-operator.v0.1.0-placeholder
+ name: loki-operator.v0.1.0
namespace: placeholder
spec:
apiservicedefinitions: {}
@@ -1486,7 +1486,7 @@ spec:
value: quay.io/observatorium/api:latest
- name: RELATED_IMAGE_OPA
value: quay.io/observatorium/opa-openshift:latest
- image: quay.io/openshift-logging/loki-operator:v0.1.0-placeholder
+ image: quay.io/openshift-logging/loki-operator:v0.1.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
@@ -1608,7 +1608,7 @@ spec:
name: gateway
- image: quay.io/observatorium/opa-openshift:latest
name: opa
- version: 0.1.0-placeholder
+ version: 0.1.0
webhookdefinitions:
- admissionReviewVersions:
- v1
diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml
index 44ceff8c779d1..86cb69733899b 100644
--- a/operator/config/manager/kustomization.yaml
+++ b/operator/config/manager/kustomization.yaml
@@ -6,4 +6,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/openshift-logging/loki-operator
- newTag: v0.1.0-placeholder
+ newTag: v0.1.0
|
operator
|
Remove static placeholder suffix for openshift bundle (#8998)
|
88ef940ede3ecf965481a104b4fdb35cad7bb79f
|
2021-11-16 06:22:17
|
Ed Welch
|
loki: Set querier worker max concurrent regardless of run configuration. (#4761)
| false
|
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 18d30c5801573..c11b301c98fdc 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -212,6 +212,8 @@ func (t *Loki) initQuerier() (services.Service, error) {
if t.Cfg.Ingester.QueryStoreMaxLookBackPeriod != 0 {
t.Cfg.Querier.IngesterQueryStoreMaxLookback = t.Cfg.Ingester.QueryStoreMaxLookBackPeriod
}
+ // Querier worker's max concurrent requests must be the same as the querier setting
+ t.Cfg.Worker.MaxConcurrentRequests = t.Cfg.Querier.MaxConcurrent
var err error
t.Querier, err = querier.New(t.Cfg.Querier, t.Store, t.ingesterQuerier, t.overrides)
diff --git a/pkg/querier/worker_service.go b/pkg/querier/worker_service.go
index 6a3fa3afd89b1..9eeb6a8bcfb25 100644
--- a/pkg/querier/worker_service.go
+++ b/pkg/querier/worker_service.go
@@ -131,9 +131,6 @@ func InitWorkerService(
internalHandler = internalMiddleware.Wrap(internalHandler)
- //Querier worker's max concurrent requests must be the same as the querier setting
- (*cfg.QuerierWorkerConfig).MaxConcurrentRequests = cfg.QuerierMaxConcurrent
-
//Return a querier worker pointed to the internal querier HTTP handler so there is not a conflict in routes between the querier
//and the query frontend
return querier_worker.NewQuerierWorker(
diff --git a/pkg/querier/worker_service_test.go b/pkg/querier/worker_service_test.go
index 3eadfc82e1e9a..5a282a5d8f024 100644
--- a/pkg/querier/worker_service_test.go
+++ b/pkg/querier/worker_service_test.go
@@ -224,18 +224,6 @@ func Test_InitQuerierService(t *testing.T) {
}
})
- t.Run("set the worker's max concurrent request to the same as the max concurrent setting for the querier", func(t *testing.T) {
- for _, config := range nonStandaloneTargetPermutations {
- workerConfig := querier_worker.Config{}
- config.QuerierWorkerConfig = &workerConfig
- config.QuerierMaxConcurrent = 42
-
- testContext(config, nil)
-
- assert.Equal(t, 42, workerConfig.MaxConcurrentRequests)
- }
- })
-
t.Run("always return a query worker service", func(t *testing.T) {
for _, config := range nonStandaloneTargetPermutations {
workerConfig := querier_worker.Config{}
|
loki
|
Set querier worker max concurrent regardless of run configuration. (#4761)
|
b8b3ed1784f936d7903382694679db11a246e6ca
|
2025-02-13 02:09:17
|
J Stickler
|
docs: adding Promtail deprecation banners (#16227)
| false
|
diff --git a/docs/sources/send-data/_index.md b/docs/sources/send-data/_index.md
index 08ef70b014ab0..19bbe5fc7a14d 100644
--- a/docs/sources/send-data/_index.md
+++ b/docs/sources/send-data/_index.md
@@ -20,13 +20,13 @@ The following clients are developed and supported (for those customers who have
- [Grafana Alloy](https://grafana.com/docs/alloy/latest/) - Grafana Alloy is a vendor-neutral distribution of the OpenTelemetry (OTel) Collector. Alloy offers native pipelines for OTel, Prometheus, Pyroscope, Loki, and many other metrics, logs, traces, and profile tools. In addition, you can use Alloy pipelines to do different tasks, such as configure alert rules in Loki and Mimir. Alloy is fully compatible with the OTel Collector, Prometheus Agent, and Promtail. You can use Alloy as an alternative to either of these solutions or combine it into a hybrid system of multiple collectors and agents. You can deploy Alloy anywhere within your IT infrastructure and pair it with your Grafana LGTM stack, a telemetry backend from Grafana Cloud, or any other compatible backend from any other vendor.
{{< docs/shared source="alloy" lookup="agent-deprecation.md" version="next" >}}
-- [Grafana Agent](/docs/agent/latest/) - The Grafana Agent is a client for the Grafana stack. It can collect telemetry data for metrics, logs, traces, and continuous profiles and is fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems.
-- [Promtail](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/promtail/) - Promtail can be configured to automatically scrape logs from Kubernetes pods running on the same node that Promtail runs on. Promtail and Prometheus running together in Kubernetes enables powerful debugging: if Prometheus and Promtail use the same labels, users can use tools like Grafana to switch between metrics and logs based on the label set. Promtail can be configured to tail logs from all files given a host path. It is the easiest way to send logs to Loki from plain-text files (for example, things that log to `/var/log/*.log`).
-Promtail works well if you want to extract metrics from logs such as counting the occurrences of a particular message.
-{{< admonition type="note" >}}
-Promtail is feature complete. All future feature development will occur in Grafana Alloy.
-{{< /admonition >}}
- [xk6-loki extension](https://github.com/grafana/xk6-loki) - The k6-loki extension lets you perform [load testing on Loki](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/k6/).
+- [Grafana Agent](/docs/agent/latest/) (DEPRECATED) - The Grafana Agent is a client for the Grafana stack. It can collect telemetry data for metrics, logs, traces, and continuous profiles and is fully compatible with the Prometheus, OpenTelemetry, and Grafana open source ecosystems.
+- [Promtail](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/promtail/) (DEPRECATED) - Promtail can be configured to automatically scrape logs from Kubernetes pods running on the same node that Promtail runs on.
+{{< admonition type="caution" >}}
+Promtail is deprecated. If you are currently using Promtail, you should plan your [migration to Alloy](https://grafana.com/docs/loki/<LOKI_VERSION>/setup/migrate/migrate-to-alloy/). All future feature development will occur in Grafana Alloy.
+{{< /admonition >}}
+
## OpenTelemetry Collector
diff --git a/docs/sources/send-data/promtail/_index.md b/docs/sources/send-data/promtail/_index.md
index 05eec7dc43d38..545ba39e68970 100644
--- a/docs/sources/send-data/promtail/_index.md
+++ b/docs/sources/send-data/promtail/_index.md
@@ -8,14 +8,16 @@ weight: 300
---
# Promtail agent
+{{< admonition type="caution" >}}
+Promtail is now deprecated and will enter into Long-Term Support (LTS) beginning Feb. 13, 2025. This means that Promtail will no longer receive any new feature updates, but it will receive critical bug fixes and security fixes. Commercial support will end after the LTS phase, which we anticipate will extend for about 12 months until February 28, 2026. End-of-Life (EOL) phase for Promtail will begin once LTS ends. Promtail is expected to reach EOL on March 2, 2026, afterwards no future support or updates will be provided. All future feature development will occur in Grafana Alloy.
+
+If you are currently using Promtail, you should plan your [migration to Alloy](https://grafana.com/docs/loki/<LOKI_VERSION>/setup/migrate/migrate-to-alloy/). The Alloy migration documentation includes a migration tool for converting your Promtail configuration to an Alloy configuration with a single command.
+{{< /admonition >}}
+
Promtail is an agent which ships the contents of local logs to a private Grafana Loki
instance or [Grafana Cloud](/oss/loki). It is usually
deployed to every machine that runs applications which need to be monitored.
-{{< admonition type="note" >}}
-Promtail is feature complete. All future feature development will occur in Grafana Alloy.
-{{< /admonition >}}
-
It primarily:
- Discovers targets
@@ -74,12 +76,13 @@ scrape_configs:
```
Important details are:
-* It relies on the `\n` character to separate the data into different log lines.
-* The max expected log line is 2MB within the compressed file.
-* The data is decompressed in blocks of 4096 bytes. i.e: it first fetches a block of 4096 bytes
+
+- It relies on the `\n` character to separate the data into different log lines.
+- The max expected log line is 2MB within the compressed file.
+- The data is decompressed in blocks of 4096 bytes. i.e: it first fetches a block of 4096 bytes
from the compressed file and processes it. After processing this block and pushing the data to Loki,
it fetches the following 4096 bytes, and so on.
-* It supports the following extensions:
+- It supports the following extensions:
- `.gz`: Data will be decompressed with the native Gunzip Golang pkg (`pkg/compress/gzip`)
- `.z`: Data will be decompressed with the native Zlib Golang pkg (`pkg/compress/zlib`)
- `.bz2`: Data will be decompressed with the native Bzip2 Golang pkg (`pkg/compress/bzip2`)
@@ -88,25 +91,20 @@ Important details are:
compressed file, **the first parsed line will contains metadata together with
your log line**. It is illustrated at
`./clients/pkg/promtail/targets/file/decompresser_test.go`.
-* `.zip` extension isn't supported as of now because it doesn't support some of the interfaces
- Promtail requires. We have plans to add support for it in the near future.
-* The decompression is quite CPU intensive and a lot of allocations are expected
+- `.zip` extension isn't supported as of now because it doesn't support some of the interfaces
+ Promtail requires.
+- The decompression is quite CPU intensive and a lot of allocations are expected
to occur, especially depending on the size of the file. You can expect the number
of garbage collection runs and the CPU usage to skyrocket, but no memory leak is
expected.
-* Positions are supported. That means that, if you interrupt Promtail after
+- Positions are supported. That means that, if you interrupt Promtail after
parsing and pushing (for example) 45% of your compressed file data, you can expect Promtail
to resume work from the last scraped line and process the rest of the remaining 55%.
-* Since decompression and pushing can be very fast, depending on the size
+- Since decompression and pushing can be very fast, depending on the size
of your compressed file Loki will rate-limit your ingestion. In that case you
might configure Promtail's [`limits` stage](https://grafana.com/docs/loki/<LOKI_VERSION>/send-data/promtail/configuration/#limits_config) to slow the pace or increase [ingestion limits](https://grafana.com/docs/loki/<LOKI_VERSION>/configure/#limits_config) on Loki.
-
-* Log rotations on compressed files **are not supported as of now** (log rotation is fully supported for normal files), mostly because it requires us modifying Promtail to
- rely on file inodes instead of file names. If you'd like to see support for it, create a new
- issue on Github asking for it and explaining your use case.
-* If you compress a file under a folder being scraped, Promtail might try to ingest your file before you finish compressing it. To avoid it, pick a `initial_delay` that is enough to avoid it.
-* If you would like to see support for a compression protocol that isn't listed here, create a new issue on Github asking for it and explaining your use case.
-
+- Log rotations on compressed files are not supported (log rotation is fully supported for normal files), mostly because it requires us modifying Promtail to rely on file inodes instead of file names.
+- If you compress a file under a folder being scraped, Promtail might try to ingest your file before you finish compressing it. To avoid it, pick a `initial_delay` that is enough to avoid it.
## Loki Push API
diff --git a/docs/sources/send-data/promtail/cloud/_index.md b/docs/sources/send-data/promtail/cloud/_index.md
index 87b450be37634..45377aa16421a 100644
--- a/docs/sources/send-data/promtail/cloud/_index.md
+++ b/docs/sources/send-data/promtail/cloud/_index.md
@@ -8,6 +8,8 @@ weight: 300
# Sending logs from the cloud
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
Sending logs from cloud services to Grafana Loki is a little different depending on the AWS service you are using. The following tutorials walk you through configuring cloud services to send logs to Loki.
- [Amazon Elastic Compute Cloud (EC2)]({{< relref "./ec2" >}})
diff --git a/docs/sources/send-data/promtail/cloud/ec2/_index.md b/docs/sources/send-data/promtail/cloud/ec2/_index.md
index 3c4dea7565400..a554c6289c62a 100644
--- a/docs/sources/send-data/promtail/cloud/ec2/_index.md
+++ b/docs/sources/send-data/promtail/cloud/ec2/_index.md
@@ -11,6 +11,8 @@ weight: 100
In this tutorial we're going to setup [Promtail]({{< relref "../../../../send-data/promtail" >}}) on an AWS EC2 instance and configure it to sends all its logs to a Grafana Loki instance.
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
## Requirements
Before you start you'll need:
diff --git a/docs/sources/send-data/promtail/cloud/ecs/_index.md b/docs/sources/send-data/promtail/cloud/ecs/_index.md
index 1375efea1f16c..dfa69293f24b2 100644
--- a/docs/sources/send-data/promtail/cloud/ecs/_index.md
+++ b/docs/sources/send-data/promtail/cloud/ecs/_index.md
@@ -11,6 +11,8 @@ weight: 100
[ECS][ECS] is the fully managed container orchestration service by Amazon. Combined with [Fargate][Fargate] you can run your container workload without the need to provision your own compute resources. In this tutorial we will see how you can leverage [Firelens][Firelens] an AWS log router to forward all your logs and your workload metadata to a Grafana Loki instance.
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
After this tutorial you will able to query all your logs in one place using Grafana.
## Requirements
diff --git a/docs/sources/send-data/promtail/cloud/eks/_index.md b/docs/sources/send-data/promtail/cloud/eks/_index.md
index 8811f41d08198..c867d1341b316 100644
--- a/docs/sources/send-data/promtail/cloud/eks/_index.md
+++ b/docs/sources/send-data/promtail/cloud/eks/_index.md
@@ -11,6 +11,8 @@ weight: 100
In this tutorial we'll see how to set up Promtail on [EKS][eks]. Amazon Elastic Kubernetes Service (Amazon [EKS][eks]) is a fully managed Kubernetes service, using Promtail we'll get full visibility into our cluster logs. We'll start by forwarding pods logs then nodes services and finally Kubernetes events.
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
After this tutorial you will able to query all your logs in one place using Grafana.
## Requirements
diff --git a/docs/sources/send-data/promtail/cloud/gcp/_index.md b/docs/sources/send-data/promtail/cloud/gcp/_index.md
index 89ecec4a4be06..2267fdcccbac2 100644
--- a/docs/sources/send-data/promtail/cloud/gcp/_index.md
+++ b/docs/sources/send-data/promtail/cloud/gcp/_index.md
@@ -11,6 +11,8 @@ weight:
This document explains how one can setup Google Cloud Platform to forward its cloud resource logs from a particular GCP project into Google Pubsub topic so that is available for Promtail to consume.
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
This document assumes, that reader have `gcloud` installed and have the required permissions (as mentioned in [Roles and Permission](#roles-and-permission) section).
There are two flavours of how to configure this:
diff --git a/docs/sources/send-data/promtail/configuration.md b/docs/sources/send-data/promtail/configuration.md
index 70696f32bc002..4b8f0affa7409 100644
--- a/docs/sources/send-data/promtail/configuration.md
+++ b/docs/sources/send-data/promtail/configuration.md
@@ -9,6 +9,8 @@ weight: 200
# Configure Promtail
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
Promtail is configured in a YAML file (usually referred to as `config.yaml`)
which contains information on the Promtail server, where positions are stored,
and how to scrape logs from files.
diff --git a/docs/sources/send-data/promtail/installation.md b/docs/sources/send-data/promtail/installation.md
index 2de8d90371e10..b499660cecc4b 100644
--- a/docs/sources/send-data/promtail/installation.md
+++ b/docs/sources/send-data/promtail/installation.md
@@ -9,9 +9,7 @@ weight: 100
# Install Promtail
-{{< admonition type="note" >}}
-Promtail is feature complete. All future feature development will occur in Grafana Alloy.
-{{< /admonition >}}
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
Promtail is distributed as a binary, in a Docker container,
or there is a Helm chart to install it in a Kubernetes cluster.
diff --git a/docs/sources/send-data/promtail/logrotation/_index.md b/docs/sources/send-data/promtail/logrotation/_index.md
index 24e93fbc2a8c4..2408f939586a7 100644
--- a/docs/sources/send-data/promtail/logrotation/_index.md
+++ b/docs/sources/send-data/promtail/logrotation/_index.md
@@ -9,6 +9,8 @@ weight: 500
# Promtail and Log Rotation
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
## Why does log rotation matter?
At any point in time, there may be three processes working on a log file as shown in the image below.
diff --git a/docs/sources/send-data/promtail/pipelines.md b/docs/sources/send-data/promtail/pipelines.md
index cee217a88987a..db48f88161a64 100644
--- a/docs/sources/send-data/promtail/pipelines.md
+++ b/docs/sources/send-data/promtail/pipelines.md
@@ -9,6 +9,8 @@ weight: 600
# Pipelines
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
A detailed look at how to set up Promtail to process your log lines, including
extracting metrics and labels.
diff --git a/docs/sources/send-data/promtail/scraping.md b/docs/sources/send-data/promtail/scraping.md
index 605bdeb5aaac4..f490d6a188660 100644
--- a/docs/sources/send-data/promtail/scraping.md
+++ b/docs/sources/send-data/promtail/scraping.md
@@ -9,6 +9,8 @@ weight: 400
# Configuring Promtail for service discovery
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
Promtail currently supports scraping from the following sources:
- [Azure event hubs]({{< relref "#azure-event-hubs" >}})
diff --git a/docs/sources/send-data/promtail/stages/_index.md b/docs/sources/send-data/promtail/stages/_index.md
index 1530fedd4ad1b..b8275d5080094 100644
--- a/docs/sources/send-data/promtail/stages/_index.md
+++ b/docs/sources/send-data/promtail/stages/_index.md
@@ -9,6 +9,8 @@ weight: 700
# Promtail pipeline stages
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
This section is a collection of all stages Promtail supports in a
[Pipeline]({{< relref "../pipelines" >}}).
diff --git a/docs/sources/send-data/promtail/stages/cri.md b/docs/sources/send-data/promtail/stages/cri.md
index c78c92c8029e7..8b7a75bd58a24 100644
--- a/docs/sources/send-data/promtail/stages/cri.md
+++ b/docs/sources/send-data/promtail/stages/cri.md
@@ -9,6 +9,8 @@ weight:
# cri
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `cri` stage is a parsing stage that reads the log line using the standard CRI logging format.
## Schema
diff --git a/docs/sources/send-data/promtail/stages/decolorize.md b/docs/sources/send-data/promtail/stages/decolorize.md
index 0d7baae0efadc..7b653ffc82cb9 100644
--- a/docs/sources/send-data/promtail/stages/decolorize.md
+++ b/docs/sources/send-data/promtail/stages/decolorize.md
@@ -9,6 +9,8 @@ weight:
# decolorize
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `decolorize` stage is a transform stage that lets you strip
ANSI color codes from the log line, thus making it easier to
parse logs further.
diff --git a/docs/sources/send-data/promtail/stages/docker.md b/docs/sources/send-data/promtail/stages/docker.md
index 4256b3304b057..b1d682525c279 100644
--- a/docs/sources/send-data/promtail/stages/docker.md
+++ b/docs/sources/send-data/promtail/stages/docker.md
@@ -9,6 +9,8 @@ weight:
# docker
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `docker` stage is a parsing stage that reads log lines in the standard
format of Docker log files.
diff --git a/docs/sources/send-data/promtail/stages/drop.md b/docs/sources/send-data/promtail/stages/drop.md
index 0c6c15a1feda9..d6eba38b67b2d 100644
--- a/docs/sources/send-data/promtail/stages/drop.md
+++ b/docs/sources/send-data/promtail/stages/drop.md
@@ -9,6 +9,8 @@ weight:
# drop
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `drop` stage is a filtering stage that lets you drop logs based on several options.
It's important to note that if you provide multiple options they will be treated like an AND clause,
diff --git a/docs/sources/send-data/promtail/stages/eventlogmessage.md b/docs/sources/send-data/promtail/stages/eventlogmessage.md
index 4323333942152..c3346f39c5bd3 100644
--- a/docs/sources/send-data/promtail/stages/eventlogmessage.md
+++ b/docs/sources/send-data/promtail/stages/eventlogmessage.md
@@ -9,6 +9,8 @@ weight:
# eventlogmessage
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `eventlogmessage` stage is a parsing stage that extracts data from the Message string that appears in the Windows Event Log.
## Schema
diff --git a/docs/sources/send-data/promtail/stages/geoip.md b/docs/sources/send-data/promtail/stages/geoip.md
index 769bb3908b98e..2f8d8b7aab0a5 100644
--- a/docs/sources/send-data/promtail/stages/geoip.md
+++ b/docs/sources/send-data/promtail/stages/geoip.md
@@ -9,6 +9,8 @@ weight:
# geoip
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `geoip` stage is a parsing stage that reads an ip address and populates the labelset with geoip fields. [Maxmind's GeoIP2 database](https://www.maxmind.com/en/home) is used for the lookup.
Populated fields for City db:
diff --git a/docs/sources/send-data/promtail/stages/json.md b/docs/sources/send-data/promtail/stages/json.md
index 79a4c2438fb8d..f8ccd65386322 100644
--- a/docs/sources/send-data/promtail/stages/json.md
+++ b/docs/sources/send-data/promtail/stages/json.md
@@ -9,6 +9,8 @@ weight:
# json
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `json` stage is a parsing stage that reads the log line as JSON and accepts
[JMESPath](http://jmespath.org/) expressions to extract data.
diff --git a/docs/sources/send-data/promtail/stages/labelallow.md b/docs/sources/send-data/promtail/stages/labelallow.md
index a83659c3d451f..d6a55352e5237 100644
--- a/docs/sources/send-data/promtail/stages/labelallow.md
+++ b/docs/sources/send-data/promtail/stages/labelallow.md
@@ -9,6 +9,8 @@ weight:
# labelallow
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The labelallow stage is an action stage that allows only the provided labels
to be included in the label set that is sent to Loki with the log entry.
diff --git a/docs/sources/send-data/promtail/stages/labeldrop.md b/docs/sources/send-data/promtail/stages/labeldrop.md
index d552c9e34edec..0353fd671453b 100644
--- a/docs/sources/send-data/promtail/stages/labeldrop.md
+++ b/docs/sources/send-data/promtail/stages/labeldrop.md
@@ -9,6 +9,8 @@ weight:
# labeldrop
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The labeldrop stage is an action stage that drops labels from
the label set that is sent to Loki with the log entry.
diff --git a/docs/sources/send-data/promtail/stages/labels.md b/docs/sources/send-data/promtail/stages/labels.md
index d996f2154302f..61f2e8694e82b 100644
--- a/docs/sources/send-data/promtail/stages/labels.md
+++ b/docs/sources/send-data/promtail/stages/labels.md
@@ -9,6 +9,8 @@ weight:
# labels
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The labels stage is an action stage that takes data from the extracted map and
modifies the label set that is sent to Loki with the log entry.
diff --git a/docs/sources/send-data/promtail/stages/limit.md b/docs/sources/send-data/promtail/stages/limit.md
index c4612431b0f2a..675c9cf583758 100644
--- a/docs/sources/send-data/promtail/stages/limit.md
+++ b/docs/sources/send-data/promtail/stages/limit.md
@@ -9,6 +9,8 @@ weight:
# limit
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `limit` stage is a rate-limiting stage that throttles logs based on several options.
## Limit stage schema
diff --git a/docs/sources/send-data/promtail/stages/logfmt.md b/docs/sources/send-data/promtail/stages/logfmt.md
index d625929983e2c..554941beb0a61 100644
--- a/docs/sources/send-data/promtail/stages/logfmt.md
+++ b/docs/sources/send-data/promtail/stages/logfmt.md
@@ -9,6 +9,8 @@ weight:
# logfmt
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `logfmt` stage is a parsing stage that reads the log line as [logfmt](https://brandur.org/logfmt) and allows extraction of data into labels.
## Schema
diff --git a/docs/sources/send-data/promtail/stages/match.md b/docs/sources/send-data/promtail/stages/match.md
index c76a3a8dc8340..e4f3dca3198a4 100644
--- a/docs/sources/send-data/promtail/stages/match.md
+++ b/docs/sources/send-data/promtail/stages/match.md
@@ -9,6 +9,8 @@ weight:
# match
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The match stage is a filtering stage that conditionally applies a set of stages
or drop entries when a log entry matches a configurable LogQL
[stream selector]({{< relref "../../../query/log_queries#log-stream-selector" >}}) and
diff --git a/docs/sources/send-data/promtail/stages/metrics.md b/docs/sources/send-data/promtail/stages/metrics.md
index ea1c7b78150c5..b9572eae6504c 100644
--- a/docs/sources/send-data/promtail/stages/metrics.md
+++ b/docs/sources/send-data/promtail/stages/metrics.md
@@ -9,6 +9,8 @@ weight:
# metrics
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `metrics` stage is an action stage that allows for defining and updating
metrics based on data from the extracted map. Note that created metrics are not
pushed to Loki and are instead exposed via Promtail's `/metrics` endpoint.
diff --git a/docs/sources/send-data/promtail/stages/multiline.md b/docs/sources/send-data/promtail/stages/multiline.md
index d22e90246da59..088e9c6e181bd 100644
--- a/docs/sources/send-data/promtail/stages/multiline.md
+++ b/docs/sources/send-data/promtail/stages/multiline.md
@@ -9,6 +9,8 @@ weight:
# multiline
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `multiline` stage merges multiple lines into a multiline block before passing it on to the next stage in the pipeline.
A new block is identified by the `firstline` regular expression. Any line that does *not* match the expression is considered to be part of the block of the previous match.
diff --git a/docs/sources/send-data/promtail/stages/output.md b/docs/sources/send-data/promtail/stages/output.md
index e9e01023fd5b0..f997e8f6b8b35 100644
--- a/docs/sources/send-data/promtail/stages/output.md
+++ b/docs/sources/send-data/promtail/stages/output.md
@@ -9,6 +9,8 @@ weight:
# output
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `output` stage is an action stage that takes data from the extracted map and
changes the log line that will be sent to Loki.
diff --git a/docs/sources/send-data/promtail/stages/pack.md b/docs/sources/send-data/promtail/stages/pack.md
index 405214f4774db..d7acac3563a74 100644
--- a/docs/sources/send-data/promtail/stages/pack.md
+++ b/docs/sources/send-data/promtail/stages/pack.md
@@ -9,6 +9,8 @@ weight:
# pack
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `pack` stage is a transform stage which lets you embed extracted values and labels into the log line by packing the log line and labels inside a JSON object.
For example, if you wanted to remove the labels `container` and `pod` but still wanted to keep their values you could use this stage to create the following output:
diff --git a/docs/sources/send-data/promtail/stages/regex.md b/docs/sources/send-data/promtail/stages/regex.md
index 26800e020eadd..4294fb8c76991 100644
--- a/docs/sources/send-data/promtail/stages/regex.md
+++ b/docs/sources/send-data/promtail/stages/regex.md
@@ -9,6 +9,8 @@ weight:
# regex
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `regex` stage is a parsing stage that parses a log line using a regular
expression. Named capture groups in the regex support adding data into the
extracted map.
diff --git a/docs/sources/send-data/promtail/stages/replace.md b/docs/sources/send-data/promtail/stages/replace.md
index 1d9585bcf441d..d12e6d3626eaf 100644
--- a/docs/sources/send-data/promtail/stages/replace.md
+++ b/docs/sources/send-data/promtail/stages/replace.md
@@ -9,6 +9,8 @@ weight:
# replace
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `replace` stage is a parsing stage that parses a log line using a regular
expression and replaces the log line. Named capture groups in the regex support adding data into the
extracted map.
diff --git a/docs/sources/send-data/promtail/stages/sampling.md b/docs/sources/send-data/promtail/stages/sampling.md
index 00127b431dd57..bf2ed7fd6fe9d 100644
--- a/docs/sources/send-data/promtail/stages/sampling.md
+++ b/docs/sources/send-data/promtail/stages/sampling.md
@@ -9,6 +9,8 @@ weight:
# sampling
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `sampling` stage is a stage that sampling logs.
## Sampling stage schema
diff --git a/docs/sources/send-data/promtail/stages/static_labels.md b/docs/sources/send-data/promtail/stages/static_labels.md
index deb638e95cdca..794d3197b5c5d 100644
--- a/docs/sources/send-data/promtail/stages/static_labels.md
+++ b/docs/sources/send-data/promtail/stages/static_labels.md
@@ -9,6 +9,8 @@ weight:
# static_labels
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The static_labels stage is an action stage that adds static-labels to the label set that is sent to Loki with the log entry.
## Schema
diff --git a/docs/sources/send-data/promtail/stages/structured_metadata.md b/docs/sources/send-data/promtail/stages/structured_metadata.md
index 129dd757c4a25..8952f55b9d239 100644
--- a/docs/sources/send-data/promtail/stages/structured_metadata.md
+++ b/docs/sources/send-data/promtail/stages/structured_metadata.md
@@ -5,6 +5,8 @@ description: The 'structured_metadata' Promtail pipeline stage
# structured_metadata
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `structured_metadata` stage is an action stage that takes data from the extracted map and
modifies the [structured metadata]({{< relref "../../../get-started/labels/structured-metadata" >}}) that is sent to Loki with the log entry.
diff --git a/docs/sources/send-data/promtail/stages/template.md b/docs/sources/send-data/promtail/stages/template.md
index 7f7ae46928a85..e7af02500d903 100644
--- a/docs/sources/send-data/promtail/stages/template.md
+++ b/docs/sources/send-data/promtail/stages/template.md
@@ -9,6 +9,8 @@ weight:
# template
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `template` stage is a transform stage that lets use manipulate the values in
the extracted map using [Go's template
syntax](https://golang.org/pkg/text/template/).
diff --git a/docs/sources/send-data/promtail/stages/tenant.md b/docs/sources/send-data/promtail/stages/tenant.md
index a5cc7dadba176..d36e7cb1334a1 100644
--- a/docs/sources/send-data/promtail/stages/tenant.md
+++ b/docs/sources/send-data/promtail/stages/tenant.md
@@ -9,6 +9,8 @@ weight:
# tenant
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The tenant stage is an action stage that sets the tenant ID for the log entry
picking it from a field in the extracted data map. If the field is missing, the
default promtail client [`tenant_id`]({{< relref "../configuration#clients" >}}) will
diff --git a/docs/sources/send-data/promtail/stages/timestamp.md b/docs/sources/send-data/promtail/stages/timestamp.md
index f1b08143fd579..64a957396152e 100644
--- a/docs/sources/send-data/promtail/stages/timestamp.md
+++ b/docs/sources/send-data/promtail/stages/timestamp.md
@@ -9,6 +9,8 @@ weight:
# timestamp
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
+
The `timestamp` stage is an action stage that can change the timestamp of a log
line before it is sent to Loki. When a `timestamp` stage is not present, the
timestamp of a log line defaults to the time when the log entry is scraped.
diff --git a/docs/sources/send-data/promtail/troubleshooting/_index.md b/docs/sources/send-data/promtail/troubleshooting/_index.md
index b06f0af4e2af7..a97672c8f915f 100644
--- a/docs/sources/send-data/promtail/troubleshooting/_index.md
+++ b/docs/sources/send-data/promtail/troubleshooting/_index.md
@@ -8,8 +8,9 @@ weight: 800
---
# Troubleshooting Promtail
-This document describes known failure modes of Promtail on edge cases and the
-adopted trade-offs.
+This document describes known failure modes of Promtail on edge cases and the adopted trade-offs.
+
+{{< docs/shared source="loki" lookup="promtail-deprecation.md" version="<LOKI_VERSION>" >}}
## Dry running
diff --git a/docs/sources/shared/promtail-deprecation.md b/docs/sources/shared/promtail-deprecation.md
new file mode 100644
index 0000000000000..f86f0993afc51
--- /dev/null
+++ b/docs/sources/shared/promtail-deprecation.md
@@ -0,0 +1,14 @@
+---
+description: Deprecation notice for Promtail.
+headless: true
+labels:
+ products:
+ - enterprise
+ - oss
+---
+
+[//]: # 'This file provides an admonition caution for the deprecation of Promtail.'
+
+{{< admonition type="caution" >}}
+Promtail has been deprecated and is in Long-Term Support (LTS) through February 28, 2026. Promtail will reach an End-of-Life (EOL) on March 2, 2026. You can find migration resources [here](https://grafana.com/docs/alloy/latest/set-up/migrate/from-promtail/).
+{{< /admonition >}}
|
docs
|
adding Promtail deprecation banners (#16227)
|
923671a17e250542e10037f514e8f5c21fa205b9
|
2021-02-04 22:20:52
|
Jacob Lisi
|
chore: update cortex to latest and fix refs (#3295)
| false
|
diff --git a/cmd/docker-driver/config_test.go b/cmd/docker-driver/config_test.go
index 0dd1dd8fe1a0d..2338bb01decd6 100644
--- a/cmd/docker-driver/config_test.go
+++ b/cmd/docker-driver/config_test.go
@@ -7,7 +7,7 @@ import (
"reflect"
"testing"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/docker/docker/daemon/logger"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -117,7 +117,7 @@ func Test_parsePipeline(t *testing.T) {
// all configs are supposed to be valid
name := "foo"
- _, err = stages.NewPipeline(util.Logger, got.PipelineStages, &name, prometheus.DefaultRegisterer)
+ _, err = stages.NewPipeline(util_log.Logger, got.PipelineStages, &name, prometheus.DefaultRegisterer)
if err != nil {
t.Error(err)
}
diff --git a/cmd/docker-driver/main.go b/cmd/docker-driver/main.go
index 3f76ed7fc6876..f568715fa00f5 100644
--- a/cmd/docker-driver/main.go
+++ b/cmd/docker-driver/main.go
@@ -4,7 +4,7 @@ import (
"fmt"
"os"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/docker/go-plugins-helpers/sdk"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
@@ -29,7 +29,7 @@ func main() {
os.Exit(1)
}
logger := newLogger(logLevel)
- level.Info(util.Logger).Log("msg", "Starting docker-plugin", "version", version.Info())
+ level.Info(util_log.Logger).Log("msg", "Starting docker-plugin", "version", version.Info())
h := sdk.NewHandler(`{"Implements": ["LoggingDriver"]}`)
diff --git a/cmd/loki/main.go b/cmd/loki/main.go
index d69ac92113ada..4adcf1fcd37c2 100644
--- a/cmd/loki/main.go
+++ b/cmd/loki/main.go
@@ -18,7 +18,6 @@ import (
"github.com/grafana/loki/pkg/loki"
logutil "github.com/grafana/loki/pkg/util"
- "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/grafana/loki/pkg/util/validation"
@@ -76,35 +75,35 @@ func main() {
// Init the logger which will honor the log level set in config.Server
if reflect.DeepEqual(&config.Server.LogLevel, &logging.Level{}) {
- level.Error(util.Logger).Log("msg", "invalid log level")
+ level.Error(util_log.Logger).Log("msg", "invalid log level")
os.Exit(1)
}
- util.InitLogger(&config.Server)
+ util_log.InitLogger(&config.Server)
// Validate the config once both the config file has been loaded
// and CLI flags parsed.
- err := config.Validate(util.Logger)
+ err := config.Validate(util_log.Logger)
if err != nil {
- level.Error(util.Logger).Log("msg", "validating config", "err", err.Error())
+ level.Error(util_log.Logger).Log("msg", "validating config", "err", err.Error())
os.Exit(1)
}
if config.verifyConfig {
- level.Info(util.Logger).Log("msg", "config is valid")
+ level.Info(util_log.Logger).Log("msg", "config is valid")
os.Exit(0)
}
if config.printConfig {
err := logutil.PrintConfig(os.Stderr, &config)
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to print config to stderr", "err", err.Error())
+ level.Error(util_log.Logger).Log("msg", "failed to print config to stderr", "err", err.Error())
}
}
if config.logConfig {
err := logutil.LogConfig(&config)
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to log config object", "err", err.Error())
+ level.Error(util_log.Logger).Log("msg", "failed to log config object", "err", err.Error())
}
}
@@ -112,12 +111,12 @@ func main() {
// Setting the environment variable JAEGER_AGENT_HOST enables tracing
trace, err := tracing.NewFromEnv(fmt.Sprintf("loki-%s", config.Target))
if err != nil {
- level.Error(util.Logger).Log("msg", "error in initializing tracing. tracing will not be enabled", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error in initializing tracing. tracing will not be enabled", "err", err)
}
defer func() {
if trace != nil {
if err := trace.Close(); err != nil {
- level.Error(util.Logger).Log("msg", "error closing tracing", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error closing tracing", "err", err)
}
}
@@ -128,7 +127,7 @@ func main() {
t, err := loki.New(config.Config)
util_log.CheckFatal("initialising loki", err)
- level.Info(util.Logger).Log("msg", "Starting Loki", "version", version.Info())
+ level.Info(util_log.Logger).Log("msg", "Starting Loki", "version", version.Info())
err = t.Run()
util_log.CheckFatal("running loki", err)
diff --git a/cmd/migrate/main.go b/cmd/migrate/main.go
index 9f8f3a06a4fa3..cb540f095e7ed 100644
--- a/cmd/migrate/main.go
+++ b/cmd/migrate/main.go
@@ -13,7 +13,7 @@ import (
"time"
cortex_storage "github.com/cortexproject/cortex/pkg/chunk/storage"
- cortex_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -93,7 +93,7 @@ func main() {
}
// Create a new registerer to avoid registering duplicate metrics
prometheus.DefaultRegisterer = prometheus.NewRegistry()
- sourceStore, err := cortex_storage.NewStore(sourceConfig.StorageConfig.Config, sourceConfig.ChunkStoreConfig, sourceConfig.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, cortex_util.Logger)
+ sourceStore, err := cortex_storage.NewStore(sourceConfig.StorageConfig.Config, sourceConfig.ChunkStoreConfig, sourceConfig.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, util_log.Logger)
if err != nil {
log.Println("Failed to create source store:", err)
os.Exit(1)
@@ -106,7 +106,7 @@ func main() {
// Create a new registerer to avoid registering duplicate metrics
prometheus.DefaultRegisterer = prometheus.NewRegistry()
- destStore, err := cortex_storage.NewStore(destConfig.StorageConfig.Config, destConfig.ChunkStoreConfig, destConfig.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, cortex_util.Logger)
+ destStore, err := cortex_storage.NewStore(destConfig.StorageConfig.Config, destConfig.ChunkStoreConfig, destConfig.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, util_log.Logger)
if err != nil {
log.Println("Failed to create destination store:", err)
os.Exit(1)
diff --git a/cmd/promtail/main.go b/cmd/promtail/main.go
index ea0ef529e41ad..3b21e43b26419 100644
--- a/cmd/promtail/main.go
+++ b/cmd/promtail/main.go
@@ -8,8 +8,8 @@ import (
"k8s.io/klog"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/version"
@@ -76,7 +76,7 @@ func main() {
fmt.Println("Invalid log level")
os.Exit(1)
}
- util.InitLogger(&config.ServerConfig.Config)
+ util_log.InitLogger(&config.ServerConfig.Config)
// Use Stderr instead of files for the klog.
klog.SetOutput(os.Stderr)
@@ -90,28 +90,28 @@ func main() {
if config.printConfig {
err := logutil.PrintConfig(os.Stderr, &config)
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to print config to stderr", "err", err.Error())
+ level.Error(util_log.Logger).Log("msg", "failed to print config to stderr", "err", err.Error())
}
}
if config.logConfig {
err := logutil.LogConfig(&config)
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to log config object", "err", err.Error())
+ level.Error(util_log.Logger).Log("msg", "failed to log config object", "err", err.Error())
}
}
p, err := promtail.New(config.Config, config.dryRun)
if err != nil {
- level.Error(util.Logger).Log("msg", "error creating promtail", "error", err)
+ level.Error(util_log.Logger).Log("msg", "error creating promtail", "error", err)
os.Exit(1)
}
- level.Info(util.Logger).Log("msg", "Starting Promtail", "version", version.Info())
+ level.Info(util_log.Logger).Log("msg", "Starting Promtail", "version", version.Info())
defer p.Shutdown()
if err := p.Run(); err != nil {
- level.Error(util.Logger).Log("msg", "error starting promtail", "error", err)
+ level.Error(util_log.Logger).Log("msg", "error starting promtail", "error", err)
os.Exit(1)
}
}
diff --git a/cmd/querytee/main.go b/cmd/querytee/main.go
index 4f002764549fb..9fb4aef0915b0 100644
--- a/cmd/querytee/main.go
+++ b/cmd/querytee/main.go
@@ -9,7 +9,7 @@ import (
"github.com/weaveworks/common/logging"
"github.com/weaveworks/common/server"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/tools/querytee"
"github.com/grafana/loki/pkg/loghttp"
@@ -29,7 +29,7 @@ func main() {
cfg.ProxyConfig.RegisterFlags(flag.CommandLine)
flag.Parse()
- util.InitLogger(&server.Config{
+ util_log.InitLogger(&server.Config{
LogLevel: cfg.LogLevel,
})
@@ -39,19 +39,19 @@ func main() {
i := querytee.NewInstrumentationServer(cfg.ServerMetricsPort, registry)
if err := i.Start(); err != nil {
- level.Error(util.Logger).Log("msg", "Unable to start instrumentation server", "err", err.Error())
+ level.Error(util_log.Logger).Log("msg", "Unable to start instrumentation server", "err", err.Error())
os.Exit(1)
}
// Run the proxy.
- proxy, err := querytee.NewProxy(cfg.ProxyConfig, util.Logger, lokiReadRoutes(cfg), registry)
+ proxy, err := querytee.NewProxy(cfg.ProxyConfig, util_log.Logger, lokiReadRoutes(cfg), registry)
if err != nil {
- level.Error(util.Logger).Log("msg", "Unable to initialize the proxy", "err", err.Error())
+ level.Error(util_log.Logger).Log("msg", "Unable to initialize the proxy", "err", err.Error())
os.Exit(1)
}
if err := proxy.Start(); err != nil {
- level.Error(util.Logger).Log("msg", "Unable to start the proxy", "err", err.Error())
+ level.Error(util_log.Logger).Log("msg", "Unable to start the proxy", "err", err.Error())
os.Exit(1)
}
diff --git a/cmd/querytee/response_comparator.go b/cmd/querytee/response_comparator.go
index 9ac529142e1f1..85db997df79b1 100644
--- a/cmd/querytee/response_comparator.go
+++ b/cmd/querytee/response_comparator.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"fmt"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
jsoniter "github.com/json-iterator/go"
@@ -46,7 +46,7 @@ func compareStreams(expectedRaw, actualRaw json.RawMessage, tolerance float64) e
err := fmt.Errorf("expected %d values for stream %s but got %d", expectedValuesLen,
expectedStream.Labels, actualValuesLen)
if expectedValuesLen > 0 && actualValuesLen > 0 {
- level.Error(util.Logger).Log("msg", err.Error(), "oldest-expected-ts", expectedStream.Entries[0].Timestamp.UnixNano(),
+ level.Error(util_log.Logger).Log("msg", err.Error(), "oldest-expected-ts", expectedStream.Entries[0].Timestamp.UnixNano(),
"newest-expected-ts", expectedStream.Entries[expectedValuesLen-1].Timestamp.UnixNano(),
"oldest-actual-ts", actualStream.Entries[0].Timestamp.UnixNano(), "newest-actual-ts", actualStream.Entries[actualValuesLen-1].Timestamp.UnixNano())
}
diff --git a/go.mod b/go.mod
index 650cd751efa8c..9c3fc5e024232 100644
--- a/go.mod
+++ b/go.mod
@@ -11,7 +11,7 @@ require (
github.com/cespare/xxhash/v2 v2.1.1
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
- github.com/cortexproject/cortex v1.6.1-0.20210129172402-0976147451ee
+ github.com/cortexproject/cortex v1.6.1-0.20210204145131-7dac81171c66
github.com/davecgh/go-spew v1.1.1
github.com/docker/docker v20.10.1+incompatible
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect
@@ -24,7 +24,7 @@ require (
github.com/go-logfmt/logfmt v0.5.0
github.com/gofrs/flock v0.7.1 // indirect
github.com/gogo/protobuf v1.3.1 // remember to update loki-build-image/Dockerfile too
- github.com/golang/snappy v0.0.2
+ github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3
github.com/gorilla/mux v1.7.3
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
diff --git a/go.sum b/go.sum
index a823e70b1689b..f437c3f93be18 100644
--- a/go.sum
+++ b/go.sum
@@ -336,8 +336,9 @@ github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:P
github.com/cortexproject/cortex v1.3.1-0.20200923145333-8587ea61fe17/go.mod h1:dJ9gpW7dzQ7z09cKtNN9PfebumgyO4dtNdFQ6eQEed0=
github.com/cortexproject/cortex v1.4.1-0.20201030080541-83ad6df2abea/go.mod h1:kXo5F3jlF7Ky3+I31jt/bXTzOlQjl2X/vGDpy0RY1gU=
github.com/cortexproject/cortex v1.5.1-0.20201111110551-ba512881b076/go.mod h1:zFBGVsvRBfVp6ARXZ7pmiLaGlbjda5ZnA4Y6qSJyrQg=
-github.com/cortexproject/cortex v1.6.1-0.20210129172402-0976147451ee h1:Lj7kPgeuMHzoejxD4QQjYNMDqPNB5Uiqj0GvYaINnG0=
-github.com/cortexproject/cortex v1.6.1-0.20210129172402-0976147451ee/go.mod h1:uwptskTaCiJPGHaEsIthCBtnOA1nN+KpLDezYvbvU8o=
+github.com/cortexproject/cortex v1.6.1-0.20210108144208-6c2dab103f20/go.mod h1:fOsaeeFSyWrjd9nFJO8KVUpsikcxnYsjEzQyjURBoQk=
+github.com/cortexproject/cortex v1.6.1-0.20210204145131-7dac81171c66 h1:ZCpJ2TGDLw5dmDyO0owQLod4f+Q3oRwoqT8WXa1445g=
+github.com/cortexproject/cortex v1.6.1-0.20210204145131-7dac81171c66/go.mod h1:hQ45oW8W7SKNBv4bkl1960kWyslLDbL2IWuzCQBCVGY=
github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
@@ -666,6 +667,8 @@ github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg=
+github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -1373,6 +1376,7 @@ github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:
github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9 h1:T6pkPNGKXv21lLfgD/mnIABj9aOhmz8HphDmKllfKWs=
github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ=
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
+github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
github.com/prometheus/prometheus v1.8.2-0.20210124145330-b5dfa2414b9e h1:AecjdAG+yqtpJXxsems6dOD8GT7st5qU9uvlV93G3hw=
github.com/prometheus/prometheus v1.8.2-0.20210124145330-b5dfa2414b9e/go.mod h1:pZyryEk2SoMVjRI6XFqZLW7B9vPevv8lqwESVYjP1WA=
github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg=
@@ -1502,6 +1506,8 @@ github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52 h1:z3hglXVwJ4H
github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52/go.mod h1:OqqX4x21cg5N5MMHd/yGQAc/V3wg8a7Do4Jk8HfaFZQ=
github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51 h1:cinCqkVci8c5Dg6uB3m3351EjLAXDbwJVFT+bgwu/Ew=
github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51/go.mod h1:kPvI4H0AynFiHDN95ZB28/k70ZPGCx+pBrRh6RZPimw=
+github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe h1:YMGaJuBKOK3XtCxxezHClrV2OTImnSdzpMQnXG9nqgw=
+github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe/go.mod h1:ZLDGYRNkgM+FCwYNOD+6tOV+DE2fpjzfV6iqXyOgFIw=
github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU=
github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY=
github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
@@ -1549,6 +1555,7 @@ github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9 h1:dNVIG9aKQHR9T
github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY=
github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 h1:MS5M2antM8wzMUqVxIfAi+yb6yjXvDINRFvLnmNXeIw=
github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4=
+github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs=
github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120 h1:zQtcwREXYNvW116ipgc0bRDg1avD2b6QP0RGPLlPWkc=
github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs=
github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M=
diff --git a/pkg/chunkenc/memchunk.go b/pkg/chunkenc/memchunk.go
index 98d1007d1e6ae..151e16593b345 100644
--- a/pkg/chunkenc/memchunk.go
+++ b/pkg/chunkenc/memchunk.go
@@ -13,7 +13,7 @@ import (
"time"
"github.com/cespare/xxhash/v2"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
@@ -342,7 +342,7 @@ func NewByteChunk(b []byte, blockSize, targetSize int) (*MemChunk, error) {
// Verify checksums.
expCRC := binary.BigEndian.Uint32(b[blk.offset+l:])
if expCRC != crc32.Checksum(blk.b, castagnoliTable) {
- level.Error(util.Logger).Log("msg", "Checksum does not match for a block in chunk, this block will be skipped", "err", ErrInvalidChecksum)
+ level.Error(util_log.Logger).Log("msg", "Checksum does not match for a block in chunk, this block will be skipped", "err", ErrInvalidChecksum)
continue
}
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index 3ee704b7e1ccc..eb88bc418713c 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -9,8 +9,8 @@ import (
cortex_distributor "github.com/cortexproject/cortex/pkg/distributor"
"github.com/cortexproject/cortex/pkg/ring"
ring_client "github.com/cortexproject/cortex/pkg/ring/client"
- cortex_util "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/limiter"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
@@ -124,7 +124,7 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr
ingestersRing: ingestersRing,
distributorsRing: distributorsRing,
validator: validator,
- pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ingestersRing, factory, cortex_util.Logger),
+ pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ingestersRing, factory, util_log.Logger),
ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second),
labelCache: labelCache,
}
@@ -234,10 +234,10 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
const maxExpectedReplicationSet = 5 // typical replication factor 3 plus one for inactive plus one for luck
- var descs [maxExpectedReplicationSet]ring.IngesterDesc
+ var descs [maxExpectedReplicationSet]ring.InstanceDesc
samplesByIngester := map[string][]*streamTracker{}
- ingesterDescs := map[string]ring.IngesterDesc{}
+ ingesterDescs := map[string]ring.InstanceDesc{}
for i, key := range keys {
replicationSet, err := d.ingestersRing.Get(key, ring.Write, descs[:0], nil, nil)
if err != nil {
@@ -258,7 +258,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
tracker.samplesPending.Store(int32(len(streams)))
for ingester, samples := range samplesByIngester {
- go func(ingester ring.IngesterDesc, samples []*streamTracker) {
+ go func(ingester ring.InstanceDesc, samples []*streamTracker) {
// Use a background context to make sure all ingesters get samples even if we return early
localCtx, cancel := context.WithTimeout(context.Background(), d.clientCfg.RemoteTimeout)
defer cancel()
@@ -280,7 +280,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
}
// TODO taken from Cortex, see if we can refactor out an usable interface.
-func (d *Distributor) sendSamples(ctx context.Context, ingester ring.IngesterDesc, streamTrackers []*streamTracker, pushTracker *pushTracker) {
+func (d *Distributor) sendSamples(ctx context.Context, ingester ring.InstanceDesc, streamTrackers []*streamTracker, pushTracker *pushTracker) {
err := d.sendSamplesErr(ctx, ingester, streamTrackers)
// If we succeed, decrement each sample's pending count by one. If we reach
@@ -312,7 +312,7 @@ func (d *Distributor) sendSamples(ctx context.Context, ingester ring.IngesterDes
}
// TODO taken from Cortex, see if we can refactor out an usable interface.
-func (d *Distributor) sendSamplesErr(ctx context.Context, ingester ring.IngesterDesc, streams []*streamTracker) error {
+func (d *Distributor) sendSamplesErr(ctx context.Context, ingester ring.InstanceDesc, streams []*streamTracker) error {
c, err := d.pool.GetClientFor(ingester.Addr)
if err != nil {
return err
diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go
index 05b09885bf8aa..5903c701e0571 100644
--- a/pkg/distributor/distributor_test.go
+++ b/pkg/distributor/distributor_test.go
@@ -294,7 +294,7 @@ func prepare(t *testing.T, limits *validation.Limits, kvStore kv.Client, factory
replicationFactor: 3,
}
for addr := range ingesters {
- ingestersRing.ingesters = append(ingestersRing.ingesters, ring.IngesterDesc{
+ ingestersRing.ingesters = append(ingestersRing.ingesters, ring.InstanceDesc{
Addr: addr,
})
}
@@ -363,11 +363,11 @@ func (i *mockIngester) Close() error {
// ingesters.
type mockRing struct {
prometheus.Counter
- ingesters []ring.IngesterDesc
+ ingesters []ring.InstanceDesc
replicationFactor uint32
}
-func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.IngesterDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
+func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
result := ring.ReplicationSet{
MaxErrors: 1,
Ingesters: buf[:0],
diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go
index 4f8cdd355d078..661a74c9c4927 100644
--- a/pkg/distributor/http.go
+++ b/pkg/distributor/http.go
@@ -62,7 +62,7 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) {
func ParseRequest(r *http.Request) (*logproto.PushRequest, error) {
userID, _ := user.ExtractOrgID(r.Context())
- logger := util_log.WithContext(r.Context(), util.Logger)
+ logger := util_log.WithContext(r.Context(), util_log.Logger)
body := lokiutil.NewSizeReader(r.Body)
contentType := r.Header.Get(contentType)
var req logproto.PushRequest
diff --git a/pkg/helpers/logerror.go b/pkg/helpers/logerror.go
index b6c723ee4e7e4..7fc90291da1ed 100644
--- a/pkg/helpers/logerror.go
+++ b/pkg/helpers/logerror.go
@@ -3,7 +3,6 @@ package helpers
import (
"context"
- "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
)
@@ -11,13 +10,13 @@ import (
// LogError logs any error returned by f; useful when deferring Close etc.
func LogError(message string, f func() error) {
if err := f(); err != nil {
- level.Error(util.Logger).Log("message", message, "error", err)
+ level.Error(util_log.Logger).Log("message", message, "error", err)
}
}
// LogError logs any error returned by f; useful when deferring Close etc.
func LogErrorWithContext(ctx context.Context, message string, f func() error) {
if err := f(); err != nil {
- level.Error(util_log.WithContext(ctx, util.Logger)).Log("message", message, "error", err)
+ level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("message", message, "error", err)
}
}
diff --git a/pkg/ingester/checkpoint.go b/pkg/ingester/checkpoint.go
index cc50263e3252e..cebb90a1a6c20 100644
--- a/pkg/ingester/checkpoint.go
+++ b/pkg/ingester/checkpoint.go
@@ -11,7 +11,7 @@ import (
"time"
"github.com/cortexproject/cortex/pkg/ingester/client"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/dustin/go-humanize"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
@@ -329,13 +329,13 @@ func (w *WALCheckpointWriter) Advance() (bool, error) {
// Checkpoint is named after the last WAL segment present so that when replaying the WAL
// we can start from that particular WAL segment.
checkpointDir := filepath.Join(w.segmentWAL.Dir(), fmt.Sprintf(checkpointPrefix+"%06d", lastSegment))
- level.Info(util.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir)
+ level.Info(util_log.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir)
checkpointDirTemp := checkpointDir + ".tmp"
// cleanup any old partial checkpoints
if _, err := os.Stat(checkpointDirTemp); err == nil {
if err := os.RemoveAll(checkpointDirTemp); err != nil {
- level.Error(util.Logger).Log("msg", "unable to cleanup old tmp checkpoint", "dir", checkpointDirTemp)
+ level.Error(util_log.Logger).Log("msg", "unable to cleanup old tmp checkpoint", "dir", checkpointDirTemp)
return false, err
}
}
@@ -344,7 +344,7 @@ func (w *WALCheckpointWriter) Advance() (bool, error) {
return false, errors.Wrap(err, "create checkpoint dir")
}
- checkpoint, err := wal.NewSize(log.With(util.Logger, "component", "checkpoint_wal"), nil, checkpointDirTemp, walSegmentSize, false)
+ checkpoint, err := wal.NewSize(log.With(util_log.Logger, "component", "checkpoint_wal"), nil, checkpointDirTemp, walSegmentSize, false)
if err != nil {
return false, errors.Wrap(err, "open checkpoint")
}
@@ -370,7 +370,7 @@ func (w *WALCheckpointWriter) Write(s *Series) error {
w.recs = append(w.recs, b)
w.bufSize += len(b)
- level.Debug(util.Logger).Log("msg", "writing series", "size", humanize.Bytes(uint64(len(b))))
+ level.Debug(util_log.Logger).Log("msg", "writing series", "size", humanize.Bytes(uint64(len(b))))
// 1MB
if w.bufSize > 1<<20 {
@@ -382,7 +382,7 @@ func (w *WALCheckpointWriter) Write(s *Series) error {
}
func (w *WALCheckpointWriter) flush() error {
- level.Debug(util.Logger).Log("msg", "flushing series", "totalSize", humanize.Bytes(uint64(w.bufSize)), "series", len(w.recs))
+ level.Debug(util_log.Logger).Log("msg", "flushing series", "totalSize", humanize.Bytes(uint64(w.bufSize)), "series", len(w.recs))
if err := w.checkpointWAL.Log(w.recs...); err != nil {
return err
}
@@ -491,21 +491,21 @@ func (w *WALCheckpointWriter) Close(abort bool) error {
if err := fileutil.Replace(w.checkpointWAL.Dir(), w.final); err != nil {
return errors.Wrap(err, "rename checkpoint directory")
}
- level.Info(util.Logger).Log("msg", "atomic checkpoint finished", "old", w.checkpointWAL.Dir(), "new", w.final)
+ level.Info(util_log.Logger).Log("msg", "atomic checkpoint finished", "old", w.checkpointWAL.Dir(), "new", w.final)
// We delete the WAL segments which are before the previous checkpoint and not before the
// current checkpoint created. This is because if the latest checkpoint is corrupted for any reason, we
// should be able to recover from the older checkpoint which would need the older WAL segments.
if err := w.segmentWAL.Truncate(w.lastSegment + 1); err != nil {
// It is fine to have old WAL segments hanging around if deletion failed.
// We can try again next time.
- level.Error(util.Logger).Log("msg", "error deleting old WAL segments", "err", err, "lastSegment", w.lastSegment)
+ level.Error(util_log.Logger).Log("msg", "error deleting old WAL segments", "err", err, "lastSegment", w.lastSegment)
}
if w.lastSegment >= 0 {
if err := w.deleteCheckpoints(w.lastSegment); err != nil {
// It is fine to have old checkpoints hanging around if deletion failed.
// We can try again next time.
- level.Error(util.Logger).Log("msg", "error deleting old checkpoint", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error deleting old checkpoint", "err", err)
}
}
@@ -562,7 +562,7 @@ func (c *Checkpointer) PerformCheckpoint() (err error) {
start := time.Now()
defer func() {
elapsed := time.Since(start)
- level.Info(util.Logger).Log("msg", "checkpoint done", "time", elapsed.String())
+ level.Info(util_log.Logger).Log("msg", "checkpoint done", "time", elapsed.String())
c.metrics.checkpointDuration.Observe(elapsed.Seconds())
}()
@@ -604,9 +604,9 @@ func (c *Checkpointer) Run() {
for {
select {
case <-ticker.C:
- level.Info(util.Logger).Log("msg", "starting checkpoint")
+ level.Info(util_log.Logger).Log("msg", "starting checkpoint")
if err := c.PerformCheckpoint(); err != nil {
- level.Error(util.Logger).Log("msg", "error checkpointing series", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error checkpointing series", "err", err)
continue
}
case <-c.quit:
diff --git a/pkg/ingester/flush.go b/pkg/ingester/flush.go
index 3a9fc70410779..b3f207119f857 100644
--- a/pkg/ingester/flush.go
+++ b/pkg/ingester/flush.go
@@ -201,7 +201,7 @@ func (i *Ingester) sweepStream(instance *instance, stream *stream, immediate boo
func (i *Ingester) flushLoop(j int) {
defer func() {
- level.Debug(util.Logger).Log("msg", "Ingester.flushLoop() exited")
+ level.Debug(util_log.Logger).Log("msg", "Ingester.flushLoop() exited")
i.flushQueuesDone.Done()
}()
@@ -212,11 +212,11 @@ func (i *Ingester) flushLoop(j int) {
}
op := o.(*flushOp)
- level.Debug(util.Logger).Log("msg", "flushing stream", "userid", op.userID, "fp", op.fp, "immediate", op.immediate)
+ level.Debug(util_log.Logger).Log("msg", "flushing stream", "userid", op.userID, "fp", op.fp, "immediate", op.immediate)
err := i.flushUserSeries(op.userID, op.fp, op.immediate)
if err != nil {
- level.Error(util_log.WithUserID(op.userID, util.Logger)).Log("msg", "failed to flush user", "err", err)
+ level.Error(util_log.WithUserID(op.userID, util_log.Logger)).Log("msg", "failed to flush user", "err", err)
}
// If we're exiting & we failed to flush, put the failed operation
diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go
index 5918237b62836..66d172f5cbb03 100644
--- a/pkg/ingester/flush_test.go
+++ b/pkg/ingester/flush_test.go
@@ -37,10 +37,6 @@ const (
samplesPerSeries = 100
)
-func init() {
- // util.Logger = log.NewLogfmtLogger(os.Stdout)
-}
-
func TestChunkFlushingIdle(t *testing.T) {
cfg := defaultIngesterTestConfig(t)
cfg.FlushCheckPeriod = 20 * time.Millisecond
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 4c0d9f56c02de..25047a454931c 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -12,6 +12,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
@@ -236,7 +237,7 @@ func (i *Ingester) starting(ctx context.Context) error {
start := time.Now()
- level.Info(util.Logger).Log("msg", "recovering from checkpoint")
+ level.Info(util_log.Logger).Log("msg", "recovering from checkpoint")
checkpointReader, checkpointCloser, err := newCheckpointReader(i.cfg.WAL.Dir)
if err != nil {
return err
@@ -246,19 +247,19 @@ func (i *Ingester) starting(ctx context.Context) error {
checkpointRecoveryErr := RecoverCheckpoint(checkpointReader, recoverer)
if checkpointRecoveryErr != nil {
i.metrics.walCorruptionsTotal.WithLabelValues(walTypeCheckpoint).Inc()
- level.Error(util.Logger).Log(
+ level.Error(util_log.Logger).Log(
"msg",
`Recovered from checkpoint with errors. Some streams were likely not recovered due to WAL checkpoint file corruptions (or WAL file deletions while Loki is running). No administrator action is needed and data loss is only a possibility if more than (replication factor / 2 + 1) ingesters suffer from this.`,
"elapsed", time.Since(start).String(),
)
}
- level.Info(util.Logger).Log(
+ level.Info(util_log.Logger).Log(
"msg", "recovered WAL checkpoint recovery finished",
"elapsed", time.Since(start).String(),
"errors", checkpointRecoveryErr != nil,
)
- level.Info(util.Logger).Log("msg", "recovering from WAL")
+ level.Info(util_log.Logger).Log("msg", "recovering from WAL")
segmentReader, segmentCloser, err := newWalReader(i.cfg.WAL.Dir, -1)
if err != nil {
return err
@@ -268,13 +269,13 @@ func (i *Ingester) starting(ctx context.Context) error {
segmentRecoveryErr := RecoverWAL(segmentReader, recoverer)
if segmentRecoveryErr != nil {
i.metrics.walCorruptionsTotal.WithLabelValues(walTypeSegment).Inc()
- level.Error(util.Logger).Log(
+ level.Error(util_log.Logger).Log(
"msg",
"Recovered from WAL segments with errors. Some streams and/or entries were likely not recovered due to WAL segment file corruptions (or WAL file deletions while Loki is running). No administrator action is needed and data loss is only a possibility if more than (replication factor / 2 + 1) ingesters suffer from this.",
"elapsed", time.Since(start).String(),
)
}
- level.Info(util.Logger).Log(
+ level.Info(util_log.Logger).Log(
"msg", "WAL segment recovery finished",
"elapsed", time.Since(start).String(),
"errors", segmentRecoveryErr != nil,
@@ -282,7 +283,7 @@ func (i *Ingester) starting(ctx context.Context) error {
elapsed := time.Since(start)
i.metrics.walReplayDuration.Set(elapsed.Seconds())
- level.Info(util.Logger).Log("msg", "recovery finished", "time", elapsed.String())
+ level.Info(util_log.Logger).Log("msg", "recovery finished", "time", elapsed.String())
}
diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go
index a2f83e4f5b993..6d1b0c7bd7552 100644
--- a/pkg/ingester/instance.go
+++ b/pkg/ingester/instance.go
@@ -19,8 +19,8 @@ import (
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ingester/index"
- "github.com/cortexproject/cortex/pkg/util"
cutil "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/grafana/loki/pkg/helpers"
"github.com/grafana/loki/pkg/iter"
@@ -175,7 +175,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.ENOSPC {
i.metrics.walDiskFullFailures.Inc()
i.flushOnShutdownSwitch.TriggerAnd(func() {
- level.Error(util.Logger).Log(
+ level.Error(util_log.Logger).Log(
"msg",
"Error writing to WAL, disk full, no further messages will be logged for this error",
)
diff --git a/pkg/ingester/mapper.go b/pkg/ingester/mapper.go
index ee1feb7b74e3b..3e1d99040f931 100644
--- a/pkg/ingester/mapper.go
+++ b/pkg/ingester/mapper.go
@@ -8,7 +8,7 @@ import (
"github.com/prometheus/prometheus/pkg/labels"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/common/model"
"go.uber.org/atomic"
@@ -104,7 +104,7 @@ func (m *fpMapper) maybeAddMapping(fp model.Fingerprint, collidingMetric labels.
// A new mapping has to be created.
mappedFP = m.nextMappedFP()
mappedFPs[ms] = mappedFP
- level.Info(util.Logger).Log(
+ level.Info(util_log.Logger).Log(
"msg", "fingerprint collision detected, mapping to new fingerprint",
"old_fp", fp,
"new_fp", mappedFP,
@@ -118,7 +118,7 @@ func (m *fpMapper) maybeAddMapping(fp model.Fingerprint, collidingMetric labels.
m.mtx.Lock()
m.mappings[fp] = mappedFPs
m.mtx.Unlock()
- level.Info(util.Logger).Log(
+ level.Info(util_log.Logger).Log(
"msg", "fingerprint collision detected, mapping to new fingerprint",
"old_fp", fp,
"new_fp", mappedFP,
diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go
index ef5ef062c96d4..9e63f82628762 100644
--- a/pkg/ingester/recovery.go
+++ b/pkg/ingester/recovery.go
@@ -6,7 +6,7 @@ import (
"sync"
"github.com/cortexproject/cortex/pkg/ingester/client"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/tsdb/record"
@@ -70,7 +70,7 @@ func newCheckpointReader(dir string) (WALReader, io.Closer, error) {
return nil, nil, err
}
if idx < 0 {
- level.Info(util.Logger).Log("msg", "no checkpoint found, treating as no-op")
+ level.Info(util_log.Logger).Log("msg", "no checkpoint found, treating as no-op")
var reader NoopWALReader
return reader, reader, nil
}
diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go
index 73aafd9afe4d6..a531cfc24553c 100644
--- a/pkg/ingester/stream.go
+++ b/pkg/ingester/stream.go
@@ -8,7 +8,6 @@ import (
"sync"
"time"
- "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@@ -181,7 +180,7 @@ func (s *stream) Push(
if err != nil {
// This should be an unlikely situation, returning an error up the stack doesn't help much here
// so instead log this to help debug the issue if it ever arises.
- level.Error(util_log.WithContext(ctx, util.Logger)).Log("msg", "failed to Close chunk", "err", err)
+ level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "failed to Close chunk", "err", err)
}
chunk.closed = true
diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go
index 224ba122966cf..2822b0d562bff 100644
--- a/pkg/ingester/tailer.go
+++ b/pkg/ingester/tailer.go
@@ -6,7 +6,6 @@ import (
"sync"
"time"
- cortex_util "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/prometheus/pkg/labels"
@@ -102,7 +101,7 @@ func (t *tailer) loop() {
if err != nil {
// Don't log any error due to tail client closing the connection
if !util.IsConnCanceled(err) {
- level.Error(util_log.WithContext(t.conn.Context(), cortex_util.Logger)).Log("msg", "Error writing to tail client", "err", err)
+ level.Error(util_log.WithContext(t.conn.Context(), util_log.Logger)).Log("msg", "Error writing to tail client", "err", err)
}
t.close()
return
diff --git a/pkg/ingester/transfer.go b/pkg/ingester/transfer.go
index 0080872eec9bb..545dc769d5fc8 100644
--- a/pkg/ingester/transfer.go
+++ b/pkg/ingester/transfer.go
@@ -37,7 +37,7 @@ var (
// TransferChunks receives all chunks from another ingester. The Ingester
// must be in PENDING state or else the call will fail.
func (i *Ingester) TransferChunks(stream logproto.Ingester_TransferChunksServer) error {
- logger := util_log.WithContext(stream.Context(), util.Logger)
+ logger := util_log.WithContext(stream.Context(), util_log.Logger)
// Prevent a shutdown from happening until we've completely finished a handoff
// from a leaving ingester.
i.shutdownMtx.Lock()
@@ -198,7 +198,7 @@ func (i *Ingester) TransferOut(ctx context.Context) error {
return nil
}
- level.Error(util_log.WithContext(ctx, util.Logger)).Log("msg", "transfer failed", "err", err)
+ level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "transfer failed", "err", err)
backoff.Wait()
}
@@ -206,7 +206,7 @@ func (i *Ingester) TransferOut(ctx context.Context) error {
}
func (i *Ingester) transferOut(ctx context.Context) error {
- logger := util_log.WithContext(ctx, util.Logger)
+ logger := util_log.WithContext(ctx, util_log.Logger)
targetIngester, err := i.findTransferTarget(ctx)
if err != nil {
return fmt.Errorf("cannot find ingester to transfer chunks to: %v", err)
@@ -296,7 +296,7 @@ func (i *Ingester) transferOut(ctx context.Context) error {
// findTransferTarget finds an ingester in a PENDING state to use for transferring
// chunks to.
-func (i *Ingester) findTransferTarget(ctx context.Context) (*ring.IngesterDesc, error) {
+func (i *Ingester) findTransferTarget(ctx context.Context) (*ring.InstanceDesc, error) {
ringDesc, err := i.lifecycler.KVStore.Get(ctx, ring.IngesterRingKey)
if err != nil {
return nil, err
diff --git a/pkg/ingester/transfer_test.go b/pkg/ingester/transfer_test.go
index 0d33bdef87a96..28ec897282b0e 100644
--- a/pkg/ingester/transfer_test.go
+++ b/pkg/ingester/transfer_test.go
@@ -10,7 +10,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/ring/kv"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
"github.com/go-kit/kit/log/level"
@@ -204,7 +204,7 @@ func (c *testIngesterClient) TransferChunks(context.Context, ...grpc.CallOption)
c.i.stopIncomingRequests() // used to be called from lifecycler, now it must be called *before* stopping lifecyler. (ingester does this on shutdown)
err := services.StopAndAwaitTerminated(context.Background(), c.i.lifecycler)
if err != nil {
- level.Error(util.Logger).Log("msg", "lifecycler failed", "err", err)
+ level.Error(util_log.Logger).Log("msg", "lifecycler failed", "err", err)
}
}()
diff --git a/pkg/ingester/wal.go b/pkg/ingester/wal.go
index 59bd6037a156b..83d69d3f5dede 100644
--- a/pkg/ingester/wal.go
+++ b/pkg/ingester/wal.go
@@ -5,7 +5,7 @@ import (
"sync"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -81,7 +81,7 @@ func newWAL(cfg WALConfig, registerer prometheus.Registerer, metrics *ingesterMe
return noopWAL{}, nil
}
- tsdbWAL, err := wal.NewSize(util.Logger, registerer, cfg.Dir, walSegmentSize, false)
+ tsdbWAL, err := wal.NewSize(util_log.Logger, registerer, cfg.Dir, walSegmentSize, false)
if err != nil {
return nil, err
}
@@ -138,7 +138,7 @@ func (w *walWrapper) Stop() error {
close(w.quit)
w.wait.Wait()
err := w.wal.Close()
- level.Info(util.Logger).Log("msg", "stopped", "component", "wal")
+ level.Info(util_log.Logger).Log("msg", "stopped", "component", "wal")
return err
}
@@ -150,7 +150,7 @@ func (w *walWrapper) checkpointWriter() *WALCheckpointWriter {
}
func (w *walWrapper) run() {
- level.Info(util.Logger).Log("msg", "started", "component", "wal")
+ level.Info(util_log.Logger).Log("msg", "started", "component", "wal")
defer w.wait.Done()
checkpointer := NewCheckpointer(
diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go
index 43added3f72ab..a3403c4b02984 100644
--- a/pkg/logcli/query/query.go
+++ b/pkg/logcli/query/query.go
@@ -13,7 +13,7 @@ import (
"time"
cortex_storage "github.com/cortexproject/cortex/pkg/chunk/storage"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/fatih/color"
json "github.com/json-iterator/go"
"github.com/prometheus/client_golang/prometheus"
@@ -182,7 +182,7 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string
return err
}
- if err := conf.Validate(util.Logger); err != nil {
+ if err := conf.Validate(util_log.Logger); err != nil {
return err
}
@@ -191,7 +191,7 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string
return err
}
- chunkStore, err := cortex_storage.NewStore(conf.StorageConfig.Config, conf.ChunkStoreConfig, conf.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, util.Logger)
+ chunkStore, err := cortex_storage.NewStore(conf.StorageConfig.Config, conf.ChunkStoreConfig, conf.SchemaConfig.SchemaConfig, limits, prometheus.DefaultRegisterer, nil, util_log.Logger)
if err != nil {
return err
}
diff --git a/pkg/logentry/stages/drop_test.go b/pkg/logentry/stages/drop_test.go
index b820662720138..5e2f3b9e4649d 100644
--- a/pkg/logentry/stages/drop_test.go
+++ b/pkg/logentry/stages/drop_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
@@ -39,7 +39,7 @@ func Test_dropStage_Process(t *testing.T) {
// Enable debug logging
cfg := &ww.Config{}
require.Nil(t, cfg.LogLevel.Set("debug"))
- util.InitLogger(cfg)
+ util_log.InitLogger(cfg)
Debug = true
tests := []struct {
@@ -265,7 +265,7 @@ func Test_dropStage_Process(t *testing.T) {
if err != nil {
t.Error(err)
}
- m, err := newDropStage(util.Logger, tt.config, prometheus.DefaultRegisterer)
+ m, err := newDropStage(util_log.Logger, tt.config, prometheus.DefaultRegisterer)
require.NoError(t, err)
out := processEntries(m, newEntry(tt.extracted, tt.labels, tt.entry, tt.t))
if tt.shouldDrop {
@@ -285,7 +285,7 @@ func ptrFromString(str string) *string {
func TestDropPipeline(t *testing.T) {
registry := prometheus.NewRegistry()
plName := "test_pipeline"
- pl, err := NewPipeline(util.Logger, loadConfig(testDropYaml), &plName, registry)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testDropYaml), &plName, registry)
require.NoError(t, err)
out := processEntries(pl,
newEntry(nil, nil, testMatchLogLineApp1, time.Now()),
diff --git a/pkg/logentry/stages/extensions_test.go b/pkg/logentry/stages/extensions_test.go
index 1852db9780232..b9d66b0b3d9c2 100644
--- a/pkg/logentry/stages/extensions_test.go
+++ b/pkg/logentry/stages/extensions_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
@@ -65,7 +65,7 @@ func TestNewDocker(t *testing.T) {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
- p, err := NewDocker(util.Logger, prometheus.DefaultRegisterer)
+ p, err := NewDocker(util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
t.Fatalf("failed to create Docker parser: %s", err)
}
@@ -139,7 +139,7 @@ func TestNewCri(t *testing.T) {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
- p, err := NewCRI(util.Logger, prometheus.DefaultRegisterer)
+ p, err := NewCRI(util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
t.Fatalf("failed to create CRI parser: %s", err)
}
diff --git a/pkg/logentry/stages/json_test.go b/pkg/logentry/stages/json_test.go
index 8b94215b4b62a..91fbdf05859a4 100644
--- a/pkg/logentry/stages/json_test.go
+++ b/pkg/logentry/stages/json_test.go
@@ -5,7 +5,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
@@ -82,7 +82,7 @@ func TestPipeline_JSON(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
- pl, err := NewPipeline(util.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer)
if err != nil {
t.Fatal(err)
}
@@ -355,7 +355,7 @@ func TestJSONParser_Parse(t *testing.T) {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
- p, err := New(util.Logger, nil, StageTypeJSON, tt.config, nil)
+ p, err := New(util_log.Logger, nil, StageTypeJSON, tt.config, nil)
if err != nil {
t.Fatalf("failed to create json parser: %s", err)
}
diff --git a/pkg/logentry/stages/labeldrop_test.go b/pkg/logentry/stages/labeldrop_test.go
index 1e7baeb911785..5111cc34108fb 100644
--- a/pkg/logentry/stages/labeldrop_test.go
+++ b/pkg/logentry/stages/labeldrop_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -15,7 +15,7 @@ func Test_dropLabelStage_Process(t *testing.T) {
// Enable debug logging
cfg := &ww.Config{}
require.Nil(t, cfg.LogLevel.Set("debug"))
- util.InitLogger(cfg)
+ util_log.InitLogger(cfg)
Debug = true
tests := []struct {
diff --git a/pkg/logentry/stages/labels_test.go b/pkg/logentry/stages/labels_test.go
index 532d16e4be4c6..261d32ed3b443 100644
--- a/pkg/logentry/stages/labels_test.go
+++ b/pkg/logentry/stages/labels_test.go
@@ -8,7 +8,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -43,7 +43,7 @@ var testLabelsLogLineWithMissingKey = `
`
func TestLabelsPipeline_Labels(t *testing.T) {
- pl, err := NewPipeline(util.Logger, loadConfig(testLabelsYaml), nil, prometheus.DefaultRegisterer)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testLabelsYaml), nil, prometheus.DefaultRegisterer)
if err != nil {
t.Fatal(err)
}
@@ -178,7 +178,7 @@ func TestLabelStage_Process(t *testing.T) {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
- st, err := newLabelStage(util.Logger, test.config)
+ st, err := newLabelStage(util_log.Logger, test.config)
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/logentry/stages/match_test.go b/pkg/logentry/stages/match_test.go
index ee285dbc65231..80eac40612549 100644
--- a/pkg/logentry/stages/match_test.go
+++ b/pkg/logentry/stages/match_test.go
@@ -5,7 +5,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
@@ -57,7 +57,7 @@ var testMatchLogLineApp2 = `
func TestMatchPipeline(t *testing.T) {
registry := prometheus.NewRegistry()
plName := "test_pipeline"
- pl, err := NewPipeline(util.Logger, loadConfig(testMatchYaml), &plName, registry)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testMatchYaml), &plName, registry)
if err != nil {
t.Fatal(err)
}
@@ -150,7 +150,7 @@ func TestMatcher(t *testing.T) {
tt.action,
nil,
}
- s, err := newMatcherStage(util.Logger, nil, matchConfig, prometheus.DefaultRegisterer)
+ s, err := newMatcherStage(util_log.Logger, nil, matchConfig, prometheus.DefaultRegisterer)
if (err != nil) != tt.wantErr {
t.Errorf("withMatcher() error = %v, wantErr %v", err, tt.wantErr)
return
diff --git a/pkg/logentry/stages/metrics_test.go b/pkg/logentry/stages/metrics_test.go
index 0505cf99af227..54b2439887fa0 100644
--- a/pkg/logentry/stages/metrics_test.go
+++ b/pkg/logentry/stages/metrics_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -110,7 +110,7 @@ promtail_custom_total_lines_count{test="app"} 2
func TestMetricsPipeline(t *testing.T) {
registry := prometheus.NewRegistry()
- pl, err := NewPipeline(util.Logger, loadConfig(testMetricYaml), nil, registry)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testMetricYaml), nil, registry)
if err != nil {
t.Fatal(err)
}
@@ -169,7 +169,7 @@ promtail_custom_loki_count 1
func TestMetricsWithDropInPipeline(t *testing.T) {
registry := prometheus.NewRegistry()
- pl, err := NewPipeline(util.Logger, loadConfig(testMetricWithDropYaml), nil, registry)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testMetricWithDropYaml), nil, registry)
if err != nil {
t.Fatal(err)
}
@@ -260,7 +260,7 @@ func TestDefaultIdleDuration(t *testing.T) {
},
},
}
- ms, err := New(util.Logger, nil, StageTypeMetric, metricsConfig, registry)
+ ms, err := New(util_log.Logger, nil, StageTypeMetric, metricsConfig, registry)
if err != nil {
t.Fatalf("failed to create stage with metrics: %v", err)
}
@@ -358,15 +358,15 @@ func TestMetricStage_Process(t *testing.T) {
}
registry := prometheus.NewRegistry()
- jsonStage, err := New(util.Logger, nil, StageTypeJSON, jsonConfig, registry)
+ jsonStage, err := New(util_log.Logger, nil, StageTypeJSON, jsonConfig, registry)
if err != nil {
t.Fatalf("failed to create stage with metrics: %v", err)
}
- regexStage, err := New(util.Logger, nil, StageTypeRegex, regexConfig, registry)
+ regexStage, err := New(util_log.Logger, nil, StageTypeRegex, regexConfig, registry)
if err != nil {
t.Fatalf("failed to create stage with metrics: %v", err)
}
- metricStage, err := New(util.Logger, nil, StageTypeMetric, metricsConfig, registry)
+ metricStage, err := New(util_log.Logger, nil, StageTypeMetric, metricsConfig, registry)
if err != nil {
t.Fatalf("failed to create stage with metrics: %v", err)
}
diff --git a/pkg/logentry/stages/multiline_test.go b/pkg/logentry/stages/multiline_test.go
index 52f8724741557..69fb4e66dd363 100644
--- a/pkg/logentry/stages/multiline_test.go
+++ b/pkg/logentry/stages/multiline_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
ww "github.com/weaveworks/common/server"
@@ -19,7 +19,7 @@ func Test_multilineStage_Process(t *testing.T) {
// Enable debug logging
cfg := &ww.Config{}
require.Nil(t, cfg.LogLevel.Set("debug"))
- util.InitLogger(cfg)
+ util_log.InitLogger(cfg)
Debug = true
mcfg := &MultilineConfig{Expression: ptrFromString("^START"), MaxWaitTime: ptrFromString("3s")}
@@ -28,7 +28,7 @@ func Test_multilineStage_Process(t *testing.T) {
stage := &multilineStage{
cfg: mcfg,
- logger: util.Logger,
+ logger: util_log.Logger,
}
out := processEntries(stage,
@@ -51,7 +51,7 @@ func Test_multilineStage_MultiStreams(t *testing.T) {
// Enable debug logging
cfg := &ww.Config{}
require.Nil(t, cfg.LogLevel.Set("debug"))
- util.InitLogger(cfg)
+ util_log.InitLogger(cfg)
Debug = true
mcfg := &MultilineConfig{Expression: ptrFromString("^START"), MaxWaitTime: ptrFromString("3s")}
@@ -60,7 +60,7 @@ func Test_multilineStage_MultiStreams(t *testing.T) {
stage := &multilineStage{
cfg: mcfg,
- logger: util.Logger,
+ logger: util_log.Logger,
}
out := processEntries(stage,
@@ -96,7 +96,7 @@ func Test_multilineStage_MaxWaitTime(t *testing.T) {
// Enable debug logging
cfg := &ww.Config{}
require.Nil(t, cfg.LogLevel.Set("debug"))
- util.InitLogger(cfg)
+ util_log.InitLogger(cfg)
Debug = true
maxWait := 2 * time.Second
@@ -106,7 +106,7 @@ func Test_multilineStage_MaxWaitTime(t *testing.T) {
stage := &multilineStage{
cfg: mcfg,
- logger: util.Logger,
+ logger: util_log.Logger,
}
in := make(chan Entry, 2)
diff --git a/pkg/logentry/stages/output_test.go b/pkg/logentry/stages/output_test.go
index 760617b0ca673..8175c0a06a9bf 100644
--- a/pkg/logentry/stages/output_test.go
+++ b/pkg/logentry/stages/output_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -43,7 +43,7 @@ var testOutputLogLineWithMissingKey = `
`
func TestPipeline_Output(t *testing.T) {
- pl, err := NewPipeline(util.Logger, loadConfig(testOutputYaml), nil, prometheus.DefaultRegisterer)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testOutputYaml), nil, prometheus.DefaultRegisterer)
if err != nil {
t.Fatal(err)
}
@@ -122,7 +122,7 @@ func TestOutputStage_Process(t *testing.T) {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
- st, err := newOutputStage(util.Logger, test.config)
+ st, err := newOutputStage(util_log.Logger, test.config)
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/logentry/stages/pipeline_test.go b/pkg/logentry/stages/pipeline_test.go
index f3ee08aaa047a..68e05dab06bb1 100644
--- a/pkg/logentry/stages/pipeline_test.go
+++ b/pkg/logentry/stages/pipeline_test.go
@@ -4,7 +4,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@@ -88,7 +88,7 @@ func loadConfig(yml string) PipelineStages {
func TestNewPipeline(t *testing.T) {
- p, err := NewPipeline(util.Logger, loadConfig(testMultiStageYaml), nil, prometheus.DefaultRegisterer)
+ p, err := NewPipeline(util_log.Logger, loadConfig(testMultiStageYaml), nil, prometheus.DefaultRegisterer)
if err != nil {
panic(err)
}
@@ -200,7 +200,7 @@ func TestPipeline_Process(t *testing.T) {
err := yaml.Unmarshal([]byte(tt.config), &config)
require.NoError(t, err)
- p, err := NewPipeline(util.Logger, config["pipeline_stages"].([]interface{}), nil, prometheus.DefaultRegisterer)
+ p, err := NewPipeline(util_log.Logger, config["pipeline_stages"].([]interface{}), nil, prometheus.DefaultRegisterer)
require.NoError(t, err)
out := processEntries(p, newEntry(nil, tt.initialLabels, tt.entry, tt.t))[0]
@@ -273,7 +273,7 @@ func TestPipeline_Wrap(t *testing.T) {
if err != nil {
panic(err)
}
- p, err := NewPipeline(util.Logger, config["pipeline_stages"].([]interface{}), nil, prometheus.DefaultRegisterer)
+ p, err := NewPipeline(util_log.Logger, config["pipeline_stages"].([]interface{}), nil, prometheus.DefaultRegisterer)
if err != nil {
panic(err)
}
diff --git a/pkg/logentry/stages/regex_test.go b/pkg/logentry/stages/regex_test.go
index 8773b4d564c58..5e8c683693fad 100644
--- a/pkg/logentry/stages/regex_test.go
+++ b/pkg/logentry/stages/regex_test.go
@@ -7,7 +7,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -101,7 +101,7 @@ func TestPipeline_Regex(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
- pl, err := NewPipeline(util.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer)
if err != nil {
t.Fatal(err)
}
@@ -322,7 +322,7 @@ func TestRegexParser_Parse(t *testing.T) {
tt := tt
t.Run(tName, func(t *testing.T) {
t.Parallel()
- p, err := New(util.Logger, nil, StageTypeRegex, tt.config, nil)
+ p, err := New(util_log.Logger, nil, StageTypeRegex, tt.config, nil)
if err != nil {
t.Fatalf("failed to create regex parser: %s", err)
}
@@ -361,7 +361,7 @@ func BenchmarkRegexStage(b *testing.B) {
}
for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
- stage, err := New(util.Logger, nil, StageTypeRegex, bm.config, nil)
+ stage, err := New(util_log.Logger, nil, StageTypeRegex, bm.config, nil)
if err != nil {
panic(err)
}
diff --git a/pkg/logentry/stages/replace_test.go b/pkg/logentry/stages/replace_test.go
index 4e2176ed82de2..eb2253019897f 100644
--- a/pkg/logentry/stages/replace_test.go
+++ b/pkg/logentry/stages/replace_test.go
@@ -5,7 +5,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
@@ -121,7 +121,7 @@ func TestPipeline_Replace(t *testing.T) {
t.Run(testName, func(t *testing.T) {
t.Parallel()
- pl, err := NewPipeline(util.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testData.config), nil, prometheus.DefaultRegisterer)
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/logentry/stages/template_test.go b/pkg/logentry/stages/template_test.go
index 0c09e90d91751..4049110a71eb0 100644
--- a/pkg/logentry/stages/template_test.go
+++ b/pkg/logentry/stages/template_test.go
@@ -7,7 +7,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -56,7 +56,7 @@ var testTemplateLogLineWithMissingKey = `
`
func TestPipeline_Template(t *testing.T) {
- pl, err := NewPipeline(util.Logger, loadConfig(testTemplateYaml), nil, prometheus.DefaultRegisterer)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testTemplateYaml), nil, prometheus.DefaultRegisterer)
if err != nil {
t.Fatal(err)
}
@@ -365,7 +365,7 @@ func TestTemplateStage_Process(t *testing.T) {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
- st, err := newTemplateStage(util.Logger, test.config)
+ st, err := newTemplateStage(util_log.Logger, test.config)
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/logentry/stages/tenant_test.go b/pkg/logentry/stages/tenant_test.go
index 0999ec71ad6ac..2a65c41f7db4f 100644
--- a/pkg/logentry/stages/tenant_test.go
+++ b/pkg/logentry/stages/tenant_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -100,7 +100,7 @@ func TestTenantStage_Validation(t *testing.T) {
testData := testData
t.Run(testName, func(t *testing.T) {
- stage, err := newTenantStage(util.Logger, testData.config)
+ stage, err := newTenantStage(util_log.Logger, testData.config)
if testData.expectedErr != nil {
assert.EqualError(t, err, *testData.expectedErr)
@@ -170,7 +170,7 @@ func TestTenantStage_Process(t *testing.T) {
testData := testData
t.Run(testName, func(t *testing.T) {
- stage, err := newTenantStage(util.Logger, testData.config)
+ stage, err := newTenantStage(util_log.Logger, testData.config)
require.NoError(t, err)
// Process and dummy line and ensure nothing has changed except
diff --git a/pkg/logentry/stages/timestamp_test.go b/pkg/logentry/stages/timestamp_test.go
index 422b8a0f1abdf..7e37d1de26ed7 100644
--- a/pkg/logentry/stages/timestamp_test.go
+++ b/pkg/logentry/stages/timestamp_test.go
@@ -7,7 +7,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -46,7 +46,7 @@ var testTimestampLogLineWithMissingKey = `
`
func TestTimestampPipeline(t *testing.T) {
- pl, err := NewPipeline(util.Logger, loadConfig(testTimestampYaml), nil, prometheus.DefaultRegisterer)
+ pl, err := NewPipeline(util_log.Logger, loadConfig(testTimestampYaml), nil, prometheus.DefaultRegisterer)
if err != nil {
t.Fatal(err)
}
@@ -298,7 +298,7 @@ func TestTimestampStage_Process(t *testing.T) {
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
- st, err := newTimestampStage(util.Logger, test.config)
+ st, err := newTimestampStage(util_log.Logger, test.config)
if err != nil {
t.Fatal(err)
}
@@ -439,7 +439,7 @@ func TestTimestampStage_ProcessActionOnFailure(t *testing.T) {
// Ensure the test has been correctly set
require.Equal(t, len(testData.inputEntries), len(testData.expectedTimestamps))
- s, err := newTimestampStage(util.Logger, testData.config)
+ s, err := newTimestampStage(util_log.Logger, testData.config)
require.NoError(t, err)
for i, inputEntry := range testData.inputEntries {
diff --git a/pkg/logql/metrics.go b/pkg/logql/metrics.go
index 50489c7235522..d992691aeb7de 100644
--- a/pkg/logql/metrics.go
+++ b/pkg/logql/metrics.go
@@ -5,7 +5,6 @@ import (
"strings"
"time"
- "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/dustin/go-humanize"
"github.com/go-kit/kit/log/level"
@@ -66,7 +65,7 @@ var (
)
func RecordMetrics(ctx context.Context, p Params, status string, stats stats.Result) {
- logger := util_log.WithContext(ctx, util.Logger)
+ logger := util_log.WithContext(ctx, util_log.Logger)
queryType, err := QueryType(p.Query())
if err != nil {
level.Warn(logger).Log("msg", "error parsing query type", "err", err)
diff --git a/pkg/logql/metrics_test.go b/pkg/logql/metrics_test.go
index c6cb39336d4b9..dd4d42d11a69c 100644
--- a/pkg/logql/metrics_test.go
+++ b/pkg/logql/metrics_test.go
@@ -7,7 +7,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/opentracing/opentracing-go"
"github.com/stretchr/testify/require"
@@ -53,7 +53,7 @@ func TestQueryType(t *testing.T) {
func TestLogSlowQuery(t *testing.T) {
buf := bytes.NewBufferString("")
- util.Logger = log.NewLogfmtLogger(buf)
+ util_log.Logger = log.NewLogfmtLogger(buf)
tr, c := jaeger.NewTracer("foo", jaeger.NewConstSampler(true), jaeger.NewInMemoryReporter())
defer c.Close()
opentracing.SetGlobalTracer(tr)
@@ -80,5 +80,5 @@ func TestLogSlowQuery(t *testing.T) {
sp.Context().(jaeger.SpanContext).SpanID().String(),
),
buf.String())
- util.Logger = log.NewNopLogger()
+ util_log.Logger = log.NewNopLogger()
}
diff --git a/pkg/logql/sharding.go b/pkg/logql/sharding.go
index 4f060d3d4d517..f315000893b64 100644
--- a/pkg/logql/sharding.go
+++ b/pkg/logql/sharding.go
@@ -7,7 +7,7 @@ import (
"time"
"github.com/cortexproject/cortex/pkg/querier/astmapper"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/prometheus/promql"
@@ -168,7 +168,7 @@ func (ev DownstreamEvaluator) Downstream(ctx context.Context, queries []Downstre
for _, res := range results {
if err := stats.JoinResults(ctx, res.Statistics); err != nil {
- level.Warn(util.Logger).Log("msg", "unable to merge downstream results", "err", err)
+ level.Warn(util_log.Logger).Log("msg", "unable to merge downstream results", "err", err)
}
}
@@ -241,7 +241,7 @@ func (ev *DownstreamEvaluator) StepEvaluator(
for i, res := range results {
stepper, err := ResultStepEvaluator(res, params)
if err != nil {
- level.Warn(util.Logger).Log(
+ level.Warn(util_log.Logger).Log(
"msg", "could not extract StepEvaluator",
"err", err,
"expr", queries[i].Expr.String(),
@@ -306,7 +306,7 @@ func (ev *DownstreamEvaluator) Iterator(
for i, res := range results {
iter, err := ResultIterator(res, params)
if err != nil {
- level.Warn(util.Logger).Log(
+ level.Warn(util_log.Logger).Log(
"msg", "could not extract Iterator",
"err", err,
"expr", queries[i].Expr.String(),
diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go
index 628fa18956665..893febe8b09de 100644
--- a/pkg/logql/shardmapper.go
+++ b/pkg/logql/shardmapper.go
@@ -4,7 +4,7 @@ import (
"fmt"
"github.com/cortexproject/cortex/pkg/querier/astmapper"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -271,7 +271,7 @@ func (m ShardMapper) mapVectorAggregationExpr(expr *vectorAggregationExpr, r *sh
default:
// this should not be reachable. If an operation is shardable it should
// have an optimization listed.
- level.Warn(util.Logger).Log(
+ level.Warn(util_log.Logger).Log(
"msg", "unexpected operation which appears shardable, ignoring",
"operation", expr.operation,
)
diff --git a/pkg/logql/stats/context_test.go b/pkg/logql/stats/context_test.go
index 8a688494ff572..b791597772e27 100644
--- a/pkg/logql/stats/context_test.go
+++ b/pkg/logql/stats/context_test.go
@@ -5,7 +5,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
jsoniter "github.com/json-iterator/go"
"github.com/stretchr/testify/require"
)
@@ -28,7 +28,7 @@ func TestSnapshot(t *testing.T) {
fakeIngesterQuery(ctx)
res := Snapshot(ctx, 2*time.Second)
- res.Log(util.Logger)
+ res.Log(util_log.Logger)
expected := Result{
Ingester: Ingester{
TotalChunksMatched: 200,
diff --git a/pkg/logql/stats/grpc.go b/pkg/logql/stats/grpc.go
index 0dcd8c403c8e0..0c9239987b7e1 100644
--- a/pkg/logql/stats/grpc.go
+++ b/pkg/logql/stats/grpc.go
@@ -4,7 +4,6 @@ import (
"context"
"sync"
- "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
jsoniter "github.com/json-iterator/go"
@@ -48,7 +47,7 @@ func CollectTrailer(ctx context.Context) grpc.CallOption {
func SendAsTrailer(ctx context.Context, stream grpc.ServerStream) {
trailer, err := encodeTrailer(ctx)
if err != nil {
- level.Warn(util_log.WithContext(ctx, util.Logger)).Log("msg", "failed to encode trailer", "err", err)
+ level.Warn(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "failed to encode trailer", "err", err)
return
}
stream.SetTrailer(trailer)
@@ -111,7 +110,7 @@ func decodeTrailers(ctx context.Context) Result {
}
func decodeTrailer(ctx context.Context, meta *metadata.MD) Result {
- logger := util_log.WithContext(ctx, util.Logger)
+ logger := util_log.WithContext(ctx, util_log.Logger)
var ingData IngesterData
values := meta.Get(ingesterDataKey)
if len(values) == 1 {
diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 7634809d8ce0d..59a97f2e6c088 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -23,6 +23,7 @@ import (
cortex_ruler "github.com/cortexproject/cortex/pkg/ruler"
"github.com/cortexproject/cortex/pkg/ruler/rules"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/runtimeconfig"
"github.com/cortexproject/cortex/pkg/util/services"
@@ -239,8 +240,8 @@ func (t *Loki) Run() error {
t.Server.HTTP.Path("/config").HandlerFunc(configHandler(t.cfg, newDefaultConfig()))
// Let's listen for events from this manager, and log them.
- healthy := func() { level.Info(util.Logger).Log("msg", "Loki started") }
- stopped := func() { level.Info(util.Logger).Log("msg", "Loki stopped") }
+ healthy := func() { level.Info(util_log.Logger).Log("msg", "Loki started") }
+ stopped := func() { level.Info(util_log.Logger).Log("msg", "Loki stopped") }
serviceFailed := func(service services.Service) {
// if any service fails, stop entire Loki
sm.StopAsync()
@@ -249,15 +250,15 @@ func (t *Loki) Run() error {
for m, s := range serviceMap {
if s == service {
if service.FailureCase() == util.ErrStopProcess {
- level.Info(util.Logger).Log("msg", "received stop signal via return error", "module", m, "error", service.FailureCase())
+ level.Info(util_log.Logger).Log("msg", "received stop signal via return error", "module", m, "error", service.FailureCase())
} else {
- level.Error(util.Logger).Log("msg", "module failed", "module", m, "error", service.FailureCase())
+ level.Error(util_log.Logger).Log("msg", "module failed", "module", m, "error", service.FailureCase())
}
return
}
}
- level.Error(util.Logger).Log("msg", "module failed", "module", "unknown", "error", service.FailureCase())
+ level.Error(util_log.Logger).Log("msg", "module failed", "module", "unknown", "error", service.FailureCase())
}
sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed))
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 6260148e38caf..2f5449bbe9888 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -28,7 +28,6 @@ import (
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/ring/kv/memberlist"
cortex_ruler "github.com/cortexproject/cortex/pkg/ruler"
- "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/runtimeconfig"
"github.com/cortexproject/cortex/pkg/util/services"
@@ -174,8 +173,8 @@ func (t *Loki) initQuerier() (services.Service, error) {
// In case someone set scheduler address, we ignore it.
t.cfg.Worker.SchedulerAddress = ""
t.cfg.Worker.MaxConcurrentRequests = t.cfg.Querier.MaxConcurrent
- level.Debug(util.Logger).Log("msg", "initializing querier worker", "config", fmt.Sprintf("%+v", t.cfg.Worker))
- worker, err = cortex_querier_worker.NewQuerierWorker(t.cfg.Worker, httpgrpc_server.NewServer(t.Server.HTTPServer.Handler), util.Logger, prometheus.DefaultRegisterer)
+ level.Debug(util_log.Logger).Log("msg", "initializing querier worker", "config", fmt.Sprintf("%+v", t.cfg.Worker))
+ worker, err = cortex_querier_worker.NewQuerierWorker(t.cfg.Worker, httpgrpc_server.NewServer(t.Server.HTTPServer.Handler), util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, err
}
@@ -250,7 +249,7 @@ func (t *Loki) initTableManager() (services.Service, error) {
t.cfg.TableManager.ChunkTables.InactiveReadScale.Enabled ||
t.cfg.TableManager.IndexTables.InactiveReadScale.Enabled) &&
t.cfg.StorageConfig.AWSStorageConfig.Metrics.URL == "" {
- level.Error(util.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided")
+ level.Error(util_log.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided")
os.Exit(1)
}
@@ -306,7 +305,7 @@ func (t *Loki) initStore() (_ services.Service, err error) {
}
}
- chunkStore, err := cortex_storage.NewStore(t.cfg.StorageConfig.Config, t.cfg.ChunkStoreConfig, t.cfg.SchemaConfig.SchemaConfig, t.overrides, prometheus.DefaultRegisterer, nil, util.Logger)
+ chunkStore, err := cortex_storage.NewStore(t.cfg.StorageConfig.Config, t.cfg.ChunkStoreConfig, t.cfg.SchemaConfig.SchemaConfig, t.overrides, prometheus.DefaultRegisterer, nil, util_log.Logger)
if err != nil {
return
}
@@ -364,7 +363,7 @@ type disabledShuffleShardingLimits struct{}
func (disabledShuffleShardingLimits) MaxQueriersPerUser(userID string) int { return 0 }
func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
- level.Debug(util.Logger).Log("msg", "initializing query frontend", "config", fmt.Sprintf("%+v", t.cfg.Frontend))
+ level.Debug(util_log.Logger).Log("msg", "initializing query frontend", "config", fmt.Sprintf("%+v", t.cfg.Frontend))
roundTripper, frontendV1, _, err := frontend.InitFrontend(frontend.CombinedFrontendConfig{
// Don't set FrontendV2 field to make sure that only frontendV1 can be initialized.
@@ -372,7 +371,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
FrontendV1: t.cfg.Frontend.FrontendV1,
CompressResponses: t.cfg.Frontend.CompressResponses,
DownstreamURL: t.cfg.Frontend.DownstreamURL,
- }, disabledShuffleShardingLimits{}, t.cfg.Server.GRPCListenPort, util.Logger, prometheus.DefaultRegisterer)
+ }, disabledShuffleShardingLimits{}, t.cfg.Server.GRPCListenPort, util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, err
}
@@ -381,13 +380,13 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
frontendv1pb.RegisterFrontendServer(t.Server.GRPC, t.frontend)
}
- level.Debug(util.Logger).Log("msg", "initializing query range tripperware",
+ level.Debug(util_log.Logger).Log("msg", "initializing query range tripperware",
"config", fmt.Sprintf("%+v", t.cfg.QueryRange),
"limits", fmt.Sprintf("%+v", t.cfg.LimitsConfig),
)
tripperware, stopper, err := queryrange.NewTripperware(
t.cfg.QueryRange,
- util.Logger,
+ util_log.Logger,
t.overrides,
t.cfg.SchemaConfig.SchemaConfig,
t.cfg.Querier.QueryIngestersWithin,
@@ -399,7 +398,7 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
t.stopper = stopper
roundTripper = tripperware(roundTripper)
- frontendHandler := transport.NewHandler(t.cfg.Frontend.Handler, roundTripper, util.Logger, prometheus.DefaultRegisterer)
+ frontendHandler := transport.NewHandler(t.cfg.Frontend.Handler, roundTripper, util_log.Logger, prometheus.DefaultRegisterer)
if t.cfg.Frontend.CompressResponses {
frontendHandler = gziphandler.GzipHandler(frontendHandler)
}
@@ -464,7 +463,7 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) {
// to determine if it's unconfigured. the following check, however, correctly tests this.
// Single binary integration tests will break if this ever drifts
if t.cfg.Target == All && t.cfg.Ruler.StoreConfig.IsDefaults() {
- level.Info(util.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.")
+ level.Info(util_log.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.")
return
}
@@ -489,7 +488,7 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) {
func (t *Loki) initRuler() (_ services.Service, err error) {
if t.RulerStorage == nil {
- level.Info(util.Logger).Log("msg", "RulerStorage is nil. Not starting the ruler.")
+ level.Info(util_log.Logger).Log("msg", "RulerStorage is nil. Not starting the ruler.")
return nil, nil
}
@@ -506,7 +505,7 @@ func (t *Loki) initRuler() (_ services.Service, err error) {
t.cfg.Ruler,
engine,
prometheus.DefaultRegisterer,
- util.Logger,
+ util_log.Logger,
t.RulerStorage,
t.overrides,
)
@@ -553,7 +552,7 @@ func (t *Loki) initMemberlistKV() (services.Service, error) {
ring.GetCodec(),
}
- t.memberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, util.Logger)
+ t.memberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, util_log.Logger)
return t.memberlistKV, nil
}
diff --git a/pkg/promtail/client/logger_test.go b/pkg/promtail/client/logger_test.go
index 825a2a7bd8fbc..7bee8d7a7eccf 100644
--- a/pkg/promtail/client/logger_test.go
+++ b/pkg/promtail/client/logger_test.go
@@ -5,8 +5,8 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
cortexflag "github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
@@ -16,10 +16,10 @@ import (
)
func TestNewLogger(t *testing.T) {
- _, err := NewLogger(nil, util.Logger, flagext.LabelSet{}, []Config{}...)
+ _, err := NewLogger(nil, util_log.Logger, flagext.LabelSet{}, []Config{}...)
require.Error(t, err)
- l, err := NewLogger(nil, util.Logger, flagext.LabelSet{}, []Config{{URL: cortexflag.URLValue{URL: &url.URL{Host: "string"}}}}...)
+ l, err := NewLogger(nil, util_log.Logger, flagext.LabelSet{}, []Config{{URL: cortexflag.URLValue{URL: &url.URL{Host: "string"}}}}...)
require.NoError(t, err)
l.Chan() <- api.Entry{Labels: model.LabelSet{"foo": "bar"}, Entry: logproto.Entry{Timestamp: time.Now(), Line: "entry"}}
l.Stop()
diff --git a/pkg/promtail/client/multi_test.go b/pkg/promtail/client/multi_test.go
index 903252203d6e8..9d4e0a82d0a07 100644
--- a/pkg/promtail/client/multi_test.go
+++ b/pkg/promtail/client/multi_test.go
@@ -6,8 +6,8 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@@ -19,7 +19,7 @@ import (
)
func TestNewMulti(t *testing.T) {
- _, err := NewMulti(nil, util.Logger, lokiflag.LabelSet{}, []Config{}...)
+ _, err := NewMulti(nil, util_log.Logger, lokiflag.LabelSet{}, []Config{}...)
if err == nil {
t.Fatal("expected err but got nil")
}
@@ -38,7 +38,7 @@ func TestNewMulti(t *testing.T) {
ExternalLabels: lokiflag.LabelSet{LabelSet: model.LabelSet{"hi": "there"}},
}
- clients, err := NewMulti(prometheus.DefaultRegisterer, util.Logger, lokiflag.LabelSet{LabelSet: model.LabelSet{"order": "command"}}, cc1, cc2)
+ clients, err := NewMulti(prometheus.DefaultRegisterer, util_log.Logger, lokiflag.LabelSet{LabelSet: model.LabelSet{"order": "command"}}, cc1, cc2)
if err != nil {
t.Fatalf("expected err: nil got:%v", err)
}
diff --git a/pkg/promtail/positions/positions_test.go b/pkg/promtail/positions/positions_test.go
index 0919c62cf9122..a3fe6ca9f1ed8 100644
--- a/pkg/promtail/positions/positions_test.go
+++ b/pkg/promtail/positions/positions_test.go
@@ -7,7 +7,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/require"
)
@@ -151,7 +151,7 @@ func Test_ReadOnly(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- p, err := New(util.Logger, Config{
+ p, err := New(util_log.Logger, Config{
SyncPeriod: 20 * time.Nanosecond,
PositionsFile: temp,
ReadOnly: true,
diff --git a/pkg/promtail/promtail.go b/pkg/promtail/promtail.go
index 777df8c1cc4dc..cb59014914eda 100644
--- a/pkg/promtail/promtail.go
+++ b/pkg/promtail/promtail.go
@@ -3,7 +3,7 @@ package promtail
import (
"sync"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/prometheus/client_golang/prometheus"
@@ -48,7 +48,7 @@ func New(cfg config.Config, dryRun bool, opts ...Option) (*Promtail, error) {
// Initialize promtail with some defaults and allow the options to override
// them.
promtail := &Promtail{
- logger: util.Logger,
+ logger: util_log.Logger,
reg: prometheus.DefaultRegisterer,
}
for _, o := range opts {
diff --git a/pkg/promtail/promtail_test.go b/pkg/promtail/promtail_test.go
index da148086dd8db..d284f4d821422 100644
--- a/pkg/promtail/promtail_test.go
+++ b/pkg/promtail/promtail_test.go
@@ -16,6 +16,7 @@ import (
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
@@ -46,7 +47,7 @@ func TestPromtail(t *testing.T) {
w := log.NewSyncWriter(os.Stderr)
logger := log.NewLogfmtLogger(w)
logger = level.NewFilter(logger, level.AllowInfo())
- util.Logger = logger
+ util_log.Logger = logger
initRandom()
dirName := "/tmp/promtail_test_" + randName()
@@ -428,7 +429,7 @@ func waitForEntries(timeoutSec int, handler *testServerHandler, expectedCounts m
if rcvd, ok := handler.receivedMap[file]; !ok || len(rcvd) != expectedCount {
waiting = waiting + " " + file
for _, e := range rcvd {
- level.Info(util.Logger).Log("file", file, "entry", e.Line)
+ level.Info(util_log.Logger).Log("file", file, "entry", e.Line)
}
}
}
diff --git a/pkg/promtail/targets/lokipush/pushtarget.go b/pkg/promtail/targets/lokipush/pushtarget.go
index fce793dcebe5f..9c3ea6d936cae 100644
--- a/pkg/promtail/targets/lokipush/pushtarget.go
+++ b/pkg/promtail/targets/lokipush/pushtarget.go
@@ -6,7 +6,7 @@ import (
"strings"
"time"
- cortex_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/imdario/mergo"
@@ -81,7 +81,7 @@ func (t *PushTarget) run() error {
// We don't want the /debug and /metrics endpoints running
t.config.Server.RegisterInstrumentation = false
- cortex_util.InitLogger(&t.config.Server)
+ util_log.InitLogger(&t.config.Server)
srv, err := server.New(t.config.Server)
if err != nil {
diff --git a/pkg/promtail/targets/stdin/stdin_target_manager_test.go b/pkg/promtail/targets/stdin/stdin_target_manager_test.go
index b23e33ca75461..9dd05aa05af12 100644
--- a/pkg/promtail/targets/stdin/stdin_target_manager_test.go
+++ b/pkg/promtail/targets/stdin/stdin_target_manager_test.go
@@ -7,7 +7,7 @@ import (
"strings"
"testing"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
@@ -80,7 +80,7 @@ func Test_newReaderTarget(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := fake.New(func() {})
- got, err := newReaderTarget(prometheus.DefaultRegisterer, util.Logger, tt.in, c, tt.cfg)
+ got, err := newReaderTarget(prometheus.DefaultRegisterer, util_log.Logger, tt.in, c, tt.cfg)
if (err != nil) != tt.wantErr {
t.Errorf("newReaderTarget() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -120,7 +120,7 @@ func Test_Shutdown(t *testing.T) {
stdIn = newFakeStdin("line")
appMock := &mockShutdownable{called: make(chan bool, 1)}
recorder := fake.New(func() {})
- manager, err := NewStdinTargetManager(prometheus.DefaultRegisterer, util.Logger, appMock, recorder, []scrapeconfig.Config{{}})
+ manager, err := NewStdinTargetManager(prometheus.DefaultRegisterer, util_log.Logger, appMock, recorder, []scrapeconfig.Config{{}})
require.NoError(t, err)
require.NotNil(t, manager)
require.Equal(t, true, <-appMock.called)
@@ -140,12 +140,12 @@ func compareEntries(t *testing.T, expected, actual []api.Entry) {
func Test_StdinConfigs(t *testing.T) {
// should take the first config
- require.Equal(t, scrapeconfig.DefaultScrapeConfig, getStdinConfig(util.Logger, []scrapeconfig.Config{
+ require.Equal(t, scrapeconfig.DefaultScrapeConfig, getStdinConfig(util_log.Logger, []scrapeconfig.Config{
scrapeconfig.DefaultScrapeConfig,
{},
}))
// or use the default if none if provided
- require.Equal(t, defaultStdInCfg, getStdinConfig(util.Logger, []scrapeconfig.Config{}))
+ require.Equal(t, defaultStdInCfg, getStdinConfig(util_log.Logger, []scrapeconfig.Config{}))
}
var stagesConfig = `
diff --git a/pkg/promtail/targets/windows/target.go b/pkg/promtail/targets/windows/target.go
index 30734ef977c61..0048a378d7583 100755
--- a/pkg/promtail/targets/windows/target.go
+++ b/pkg/promtail/targets/windows/target.go
@@ -8,7 +8,7 @@ import (
"sync"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/common/model"
@@ -112,7 +112,7 @@ func (t *Target) loop() {
if err != nil {
if err != win_eventlog.ERROR_NO_MORE_ITEMS {
t.err = err
- level.Error(util.Logger).Log("msg", "error fetching events", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error fetching events", "err", err)
}
break loop
}
@@ -122,7 +122,7 @@ func (t *Target) loop() {
t.handler.Chan() <- entry
if err := t.bm.save(handles[i]); err != nil {
t.err = err
- level.Error(util.Logger).Log("msg", "error saving bookmark", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error saving bookmark", "err", err)
}
}
win_eventlog.Close(handles)
diff --git a/pkg/promtail/targets/windows/target_test.go b/pkg/promtail/targets/windows/target_test.go
index b23dfc5ca8167..be857c73141b1 100755
--- a/pkg/promtail/targets/windows/target_test.go
+++ b/pkg/promtail/targets/windows/target_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
jsoniter "github.com/json-iterator/go"
"github.com/prometheus/common/model"
"github.com/spf13/afero"
@@ -26,7 +26,7 @@ func init() {
// Enable debug logging
cfg := &server.Config{}
_ = cfg.LogLevel.Set("debug")
- util.InitLogger(cfg)
+ util_log.InitLogger(cfg)
}
// Test that you can use to generate event logs locally.
@@ -57,7 +57,7 @@ func Test_GetCreateBookrmark(t *testing.T) {
}
client := fake.New(func() {})
defer client.Stop()
- ta, err := New(util.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{
+ ta, err := New(util_log.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{
BoorkmarkPath: "c:foo.xml",
PollInterval: time.Microsecond,
Query: `<QueryList>
@@ -91,7 +91,7 @@ func Test_GetCreateBookrmark(t *testing.T) {
client = fake.New(func() {})
defer client.Stop()
- ta, err = New(util.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{
+ ta, err = New(util_log.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{
BoorkmarkPath: "c:foo.xml",
PollInterval: time.Microsecond,
Query: `<QueryList>
@@ -120,7 +120,7 @@ func Test_GetCreateBookrmark(t *testing.T) {
func Test_renderEntries(t *testing.T) {
client := fake.New(func() {})
defer client.Stop()
- ta, err := New(util.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{
+ ta, err := New(util_log.Logger, client, nil, &scrapeconfig.WindowsEventsTargetConfig{
Labels: model.LabelSet{"job": "windows-events"},
EventlogName: "Application",
Query: "*",
diff --git a/pkg/promtail/targets/windows/win_eventlog/win_eventlog.go b/pkg/promtail/targets/windows/win_eventlog/win_eventlog.go
index 0ccf0fa6ae5a4..982e2ace6697b 100755
--- a/pkg/promtail/targets/windows/win_eventlog/win_eventlog.go
+++ b/pkg/promtail/targets/windows/win_eventlog/win_eventlog.go
@@ -32,7 +32,7 @@ import (
"strings"
"syscall"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
@@ -365,7 +365,7 @@ func EvtSubscribe(logName, xquery string) (EvtHandle, error) {
if err != nil {
return 0, err
}
- level.Debug(util.Logger).Log("msg", "Subcribed with handle id", "id", subsHandle)
+ level.Debug(util_log.Logger).Log("msg", "Subcribed with handle id", "id", subsHandle)
return subsHandle, nil
}
@@ -394,7 +394,7 @@ func EvtSubscribeWithBookmark(logName, xquery string, bookMark EvtHandle) (EvtHa
if err != nil {
return 0, err
}
- level.Debug(util.Logger).Log("msg", "Subcribed with handle id", "id", subsHandle)
+ level.Debug(util_log.Logger).Log("msg", "Subcribed with handle id", "id", subsHandle)
return subsHandle, nil
}
diff --git a/pkg/querier/http.go b/pkg/querier/http.go
index b7db2456172a2..e265099a67a40 100644
--- a/pkg/querier/http.go
+++ b/pkg/querier/http.go
@@ -5,7 +5,6 @@ import (
"net/http"
"time"
- "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/gorilla/websocket"
@@ -199,7 +198,7 @@ func (q *Querier) TailHandler(w http.ResponseWriter, r *http.Request) {
upgrader := websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool { return true },
}
- logger := util_log.WithContext(r.Context(), util.Logger)
+ logger := util_log.WithContext(r.Context(), util_log.Logger)
req, err := loghttp.ParseTailQuery(r)
if err != nil {
diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go
index b7cf704f22cd9..81c7c7de22496 100644
--- a/pkg/querier/ingester_querier.go
+++ b/pkg/querier/ingester_querier.go
@@ -9,7 +9,7 @@ import (
cortex_distributor "github.com/cortexproject/cortex/pkg/distributor"
"github.com/cortexproject/cortex/pkg/ring"
ring_client "github.com/cortexproject/cortex/pkg/ring/client"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
@@ -48,7 +48,7 @@ func NewIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryD
func newIngesterQuerier(clientCfg client.Config, ring ring.ReadRing, extraQueryDelay time.Duration, clientFactory ring_client.PoolFactory) (*IngesterQuerier, error) {
iq := IngesterQuerier{
ring: ring,
- pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ring, clientFactory, util.Logger),
+ pool: cortex_distributor.NewPool(clientCfg.PoolConfig, ring, clientFactory, util_log.Logger),
extraQueryDelay: extraQueryDelay,
}
@@ -74,7 +74,7 @@ func (q *IngesterQuerier) forAllIngesters(ctx context.Context, f func(logproto.Q
// forGivenIngesters runs f, in parallel, for given ingesters
// TODO taken from Cortex, see if we can refactor out an usable interface.
func (q *IngesterQuerier) forGivenIngesters(ctx context.Context, replicationSet ring.ReplicationSet, f func(logproto.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) {
- results, err := replicationSet.Do(ctx, q.extraQueryDelay, func(ctx context.Context, ingester *ring.IngesterDesc) (interface{}, error) {
+ results, err := replicationSet.Do(ctx, q.extraQueryDelay, func(ctx context.Context, ingester *ring.InstanceDesc) (interface{}, error) {
client, err := q.pool.GetClientFor(ingester.Addr)
if err != nil {
return nil, err
@@ -175,7 +175,7 @@ func (q *IngesterQuerier) TailDisconnectedIngesters(ctx context.Context, req *lo
}
// Look for disconnected ingesters or new one we should (re)connect to
- reconnectIngesters := []ring.IngesterDesc{}
+ reconnectIngesters := []ring.InstanceDesc{}
for _, ingester := range replicationSet.Ingesters {
if _, ok := connected[ingester.Addr]; ok {
@@ -232,7 +232,7 @@ func (q *IngesterQuerier) TailersCount(ctx context.Context) ([]uint32, error) {
}
// we want to check count of active tailers with only active ingesters
- ingesters := make([]ring.IngesterDesc, 0, 1)
+ ingesters := make([]ring.InstanceDesc, 0, 1)
for i := range replicationSet.Ingesters {
if replicationSet.Ingesters[i].State == ring.ACTIVE {
ingesters = append(ingesters, replicationSet.Ingesters[i])
diff --git a/pkg/querier/ingester_querier_test.go b/pkg/querier/ingester_querier_test.go
index 5800e225cb304..0c52a26cea49d 100644
--- a/pkg/querier/ingester_querier_test.go
+++ b/pkg/querier/ingester_querier_test.go
@@ -19,47 +19,47 @@ func TestQuerier_tailDisconnectedIngesters(t *testing.T) {
tests := map[string]struct {
connectedIngestersAddr []string
- ringIngesters []ring.IngesterDesc
+ ringIngesters []ring.InstanceDesc
expectedClientsAddr []string
}{
"no connected ingesters and empty ring": {
connectedIngestersAddr: []string{},
- ringIngesters: []ring.IngesterDesc{},
+ ringIngesters: []ring.InstanceDesc{},
expectedClientsAddr: []string{},
},
"no connected ingesters and ring containing new ingesters": {
connectedIngestersAddr: []string{},
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)},
expectedClientsAddr: []string{"1.1.1.1"},
},
"connected ingesters and ring contain the same ingesters": {
connectedIngestersAddr: []string{"1.1.1.1", "2.2.2.2"},
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("2.2.2.2", ring.ACTIVE), mockIngesterDesc("1.1.1.1", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("2.2.2.2", ring.ACTIVE), mockInstanceDesc("1.1.1.1", ring.ACTIVE)},
expectedClientsAddr: []string{},
},
"ring contains new ingesters compared to the connected one": {
connectedIngestersAddr: []string{"1.1.1.1"},
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("2.2.2.2", ring.ACTIVE), mockIngesterDesc("3.3.3.3", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("2.2.2.2", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)},
expectedClientsAddr: []string{"2.2.2.2", "3.3.3.3"},
},
"connected ingesters contain ingesters not in the ring anymore": {
connectedIngestersAddr: []string{"1.1.1.1", "2.2.2.2", "3.3.3.3"},
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("3.3.3.3", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE)},
expectedClientsAddr: []string{},
},
"connected ingesters contain ingesters not in the ring anymore and the ring contains new ingesters too": {
connectedIngestersAddr: []string{"1.1.1.1", "2.2.2.2", "3.3.3.3"},
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("3.3.3.3", ring.ACTIVE), mockIngesterDesc("4.4.4.4", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("3.3.3.3", ring.ACTIVE), mockInstanceDesc("4.4.4.4", ring.ACTIVE)},
expectedClientsAddr: []string{"4.4.4.4"},
},
"ring contains ingester in LEAVING state not listed in the connected ingesters": {
connectedIngestersAddr: []string{"1.1.1.1"},
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("2.2.2.2", ring.LEAVING)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("2.2.2.2", ring.LEAVING)},
expectedClientsAddr: []string{},
},
"ring contains ingester in PENDING state not listed in the connected ingesters": {
connectedIngestersAddr: []string{"1.1.1.1"},
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE), mockIngesterDesc("2.2.2.2", ring.PENDING)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE), mockInstanceDesc("2.2.2.2", ring.PENDING)},
expectedClientsAddr: []string{},
},
}
diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go
index 4afe8369615e3..6a452c7ab237c 100644
--- a/pkg/querier/querier_mock_test.go
+++ b/pkg/querier/querier_mock_test.go
@@ -287,7 +287,7 @@ type readRingMock struct {
replicationSet ring.ReplicationSet
}
-func newReadRingMock(ingesters []ring.IngesterDesc) *readRingMock {
+func newReadRingMock(ingesters []ring.InstanceDesc) *readRingMock {
return &readRingMock{
replicationSet: ring.ReplicationSet{
Ingesters: ingesters,
@@ -302,7 +302,7 @@ func (r *readRingMock) Describe(ch chan<- *prometheus.Desc) {
func (r *readRingMock) Collect(ch chan<- prometheus.Metric) {
}
-func (r *readRingMock) Get(key uint32, op ring.Operation, buf []ring.IngesterDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
+func (r *readRingMock) Get(key uint32, op ring.Operation, buf []ring.InstanceDesc, _ []string, _ []string) (ring.ReplicationSet, error) {
return r.replicationSet, nil
}
@@ -352,13 +352,13 @@ func (r *readRingMock) ShuffleShardWithLookback(identifier string, size int, loo
}
func mockReadRingWithOneActiveIngester() *readRingMock {
- return newReadRingMock([]ring.IngesterDesc{
+ return newReadRingMock([]ring.InstanceDesc{
{Addr: "test", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{1, 2, 3}},
})
}
-func mockIngesterDesc(addr string, state ring.IngesterState) ring.IngesterDesc {
- return ring.IngesterDesc{
+func mockInstanceDesc(addr string, state ring.IngesterState) ring.InstanceDesc {
+ return ring.InstanceDesc{
Addr: addr,
Timestamp: time.Now().UnixNano(),
State: state,
diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go
index 6fcbd05c3eca7..66c654fada96b 100644
--- a/pkg/querier/querier_test.go
+++ b/pkg/querier/querier_test.go
@@ -453,31 +453,31 @@ func TestQuerier_concurrentTailLimits(t *testing.T) {
t.Parallel()
tests := map[string]struct {
- ringIngesters []ring.IngesterDesc
+ ringIngesters []ring.InstanceDesc
expectedError error
tailersCount uint32
}{
"empty ring": {
- ringIngesters: []ring.IngesterDesc{},
+ ringIngesters: []ring.InstanceDesc{},
expectedError: httpgrpc.Errorf(http.StatusInternalServerError, "no active ingester found"),
},
"ring containing one pending ingester": {
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.PENDING)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.PENDING)},
expectedError: httpgrpc.Errorf(http.StatusInternalServerError, "no active ingester found"),
},
"ring containing one active ingester and 0 active tailers": {
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)},
},
"ring containing one active ingester and 1 active tailer": {
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)},
tailersCount: 1,
},
"ring containing one pending and active ingester with 1 active tailer": {
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.PENDING), mockIngesterDesc("2.2.2.2", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.PENDING), mockInstanceDesc("2.2.2.2", ring.ACTIVE)},
tailersCount: 1,
},
"ring containing one active ingester and max active tailers": {
- ringIngesters: []ring.IngesterDesc{mockIngesterDesc("1.1.1.1", ring.ACTIVE)},
+ ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)},
expectedError: httpgrpc.Errorf(http.StatusBadRequest,
"max concurrent tail requests limit exceeded, count > limit (%d > %d)", 6, 5),
tailersCount: 5,
diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go
index cbdaaa562d436..0b42ba8f5f493 100644
--- a/pkg/querier/queryrange/limits_test.go
+++ b/pkg/querier/queryrange/limits_test.go
@@ -10,7 +10,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/querier/queryrange"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
"github.com/stretchr/testify/require"
@@ -62,7 +62,7 @@ func Test_seriesLimiter(t *testing.T) {
cfg.SplitQueriesByInterval = time.Hour
cfg.CacheResults = false
// split in 6 with 4 in // max.
- tpw, stopper, err := NewTripperware(cfg, util.Logger, fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(cfg, util_log.Logger, fakeLimits{maxSeries: 1, maxQueryParallelism: 2}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go
index aaaafc302adea..d15a8ea1488b2 100644
--- a/pkg/querier/queryrange/roundtrip_test.go
+++ b/pkg/querier/queryrange/roundtrip_test.go
@@ -16,7 +16,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/cache"
"github.com/cortexproject/cortex/pkg/querier/queryrange"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
@@ -92,7 +92,7 @@ var (
// those tests are mostly for testing the glue between all component and make sure they activate correctly.
func TestMetricsTripperware(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{maxSeries: math.MaxInt32}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxSeries: math.MaxInt32}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
@@ -156,7 +156,7 @@ func TestMetricsTripperware(t *testing.T) {
func TestLogFilterTripperware(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
@@ -204,7 +204,7 @@ func TestLogFilterTripperware(t *testing.T) {
func TestSeriesTripperware(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
@@ -246,7 +246,7 @@ func TestSeriesTripperware(t *testing.T) {
func TestLabelsTripperware(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
@@ -292,7 +292,7 @@ func TestLabelsTripperware(t *testing.T) {
}
func TestLogNoRegex(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
@@ -326,7 +326,7 @@ func TestLogNoRegex(t *testing.T) {
}
func TestUnhandledPath(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
@@ -350,7 +350,7 @@ func TestUnhandledPath(t *testing.T) {
}
func TestRegexpParamsSupport(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
@@ -429,7 +429,7 @@ func TestPostQueries(t *testing.T) {
}
func TestEntriesLimitsTripperware(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{maxEntriesLimitPerQuery: 5000}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
@@ -460,7 +460,7 @@ func TestEntriesLimitsTripperware(t *testing.T) {
}
func TestEntriesLimitWithZeroTripperware(t *testing.T) {
- tpw, stopper, err := NewTripperware(testConfig, util.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
+ tpw, stopper, err := NewTripperware(testConfig, util_log.Logger, fakeLimits{}, chunk.SchemaConfig{}, 0, nil)
if stopper != nil {
defer stopper.Stop()
}
diff --git a/pkg/querier/tail.go b/pkg/querier/tail.go
index 97ef6119dd057..78dd6e5d96461 100644
--- a/pkg/querier/tail.go
+++ b/pkg/querier/tail.go
@@ -5,7 +5,6 @@ import (
"sync"
"time"
- "github.com/cortexproject/cortex/pkg/util"
util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
@@ -82,11 +81,11 @@ func (t *Tailer) loop() {
case <-checkConnectionTicker.C:
// Try to reconnect dropped ingesters and connect to new ingesters
if err := t.checkIngesterConnections(); err != nil {
- level.Error(util.Logger).Log("msg", "Error reconnecting to disconnected ingesters", "err", err)
+ level.Error(util_log.Logger).Log("msg", "Error reconnecting to disconnected ingesters", "err", err)
}
case <-tailMaxDurationTicker.C:
if err := t.close(); err != nil {
- level.Error(util.Logger).Log("msg", "Error closing Tailer", "err", err)
+ level.Error(util_log.Logger).Log("msg", "Error closing Tailer", "err", err)
}
t.closeErrChan <- errors.New("reached tail max duration limit")
return
@@ -128,12 +127,12 @@ func (t *Tailer) loop() {
if numClients == 0 {
// All the connections to ingesters are dropped, try reconnecting or return error
if err := t.checkIngesterConnections(); err != nil {
- level.Error(util.Logger).Log("msg", "Error reconnecting to ingesters", "err", err)
+ level.Error(util_log.Logger).Log("msg", "Error reconnecting to ingesters", "err", err)
} else {
continue
}
if err := t.close(); err != nil {
- level.Error(util.Logger).Log("msg", "Error closing Tailer", "err", err)
+ level.Error(util_log.Logger).Log("msg", "Error closing Tailer", "err", err)
}
t.closeErrChan <- errors.New("all ingesters closed the connection")
return
@@ -199,7 +198,7 @@ func (t *Tailer) readTailClient(addr string, querierTailClient logproto.Querier_
var err error
defer t.dropTailClient(addr)
- logger := util_log.WithContext(querierTailClient.Context(), util.Logger)
+ logger := util_log.WithContext(querierTailClient.Context(), util_log.Logger)
for {
if t.stopped {
if err := querierTailClient.CloseSend(); err != nil {
diff --git a/pkg/storage/async_store.go b/pkg/storage/async_store.go
index 35fc38a08a10a..e80bacb537596 100644
--- a/pkg/storage/async_store.go
+++ b/pkg/storage/async_store.go
@@ -5,7 +5,7 @@ import (
"fmt"
"time"
- pkg_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/go-kit/kit/log/level"
@@ -55,7 +55,7 @@ func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, thro
if a.queryIngestersWithin != 0 {
// don't query ingesters if the query does not overlap with queryIngestersWithin.
if !through.After(model.Now().Add(-a.queryIngestersWithin)) {
- level.Debug(pkg_util.Logger).Log("msg", "skipping querying ingesters for chunk ids", "query-from", from, "query-through", through)
+ level.Debug(util_log.Logger).Log("msg", "skipping querying ingesters for chunk ids", "query-from", from, "query-through", through)
errs <- nil
return
}
@@ -66,7 +66,7 @@ func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, thro
if err == nil {
level.Debug(spanLogger).Log("ingester-chunks-count", len(ingesterChunks))
- level.Debug(pkg_util.Logger).Log("msg", "got chunk ids from ingester", "count", len(ingesterChunks))
+ level.Debug(util_log.Logger).Log("msg", "got chunk ids from ingester", "count", len(ingesterChunks))
}
errs <- err
}()
@@ -87,7 +87,7 @@ func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, thro
func (a *AsyncStore) mergeIngesterAndStoreChunks(userID string, storeChunks [][]chunk.Chunk, fetchers []*chunk.Fetcher, ingesterChunkIDs []string) ([][]chunk.Chunk, []*chunk.Fetcher, error) {
ingesterChunkIDs = filterDuplicateChunks(storeChunks, ingesterChunkIDs)
- level.Debug(pkg_util.Logger).Log("msg", "post-filtering ingester chunks", "count", len(ingesterChunkIDs))
+ level.Debug(util_log.Logger).Log("msg", "post-filtering ingester chunks", "count", len(ingesterChunkIDs))
fetcherToChunksGroupIdx := make(map[*chunk.Fetcher]int, len(fetchers))
diff --git a/pkg/storage/batch.go b/pkg/storage/batch.go
index aacbdcf1cd068..00a2bc9690722 100644
--- a/pkg/storage/batch.go
+++ b/pkg/storage/batch.go
@@ -7,7 +7,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/querier/astmapper"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
@@ -664,9 +664,9 @@ func fetchLazyChunks(ctx context.Context, chunks []*LazyChunk) error {
}
chks, err := fetcher.FetchChunks(ctx, chks, keys)
if err != nil {
- level.Error(util.Logger).Log("msg", "error fetching chunks", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error fetching chunks", "err", err)
if isInvalidChunkError(err) {
- level.Error(util.Logger).Log("msg", "checksum of chunks does not match", "err", chunk.ErrInvalidChecksum)
+ level.Error(util_log.Logger).Log("msg", "checksum of chunks does not match", "err", chunk.ErrInvalidChecksum)
errChan <- nil
return
}
diff --git a/pkg/storage/hack/main.go b/pkg/storage/hack/main.go
index 0a086b3368128..2b05362ad3ae5 100644
--- a/pkg/storage/hack/main.go
+++ b/pkg/storage/hack/main.go
@@ -18,7 +18,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk/local"
"github.com/cortexproject/cortex/pkg/chunk/storage"
"github.com/cortexproject/cortex/pkg/ingester/client"
- cortex_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/grafana/loki/pkg/chunkenc"
"github.com/grafana/loki/pkg/logproto"
@@ -74,7 +74,7 @@ func getStore() (lstore.Store, error) {
&validation.Overrides{},
prometheus.DefaultRegisterer,
nil,
- cortex_util.Logger,
+ util_log.Logger,
)
if err != nil {
return nil, err
diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go
index 132f33de6b4e1..5bd9ff441525c 100644
--- a/pkg/storage/store_test.go
+++ b/pkg/storage/store_test.go
@@ -12,7 +12,7 @@ import (
"testing"
"time"
- cortex_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/stretchr/testify/assert"
@@ -219,7 +219,7 @@ func getLocalStore() Store {
chunkStore, err := storage.NewStore(
storeConfig.Config,
chunk.StoreConfig{},
- schemaConfig.SchemaConfig, limits, nil, nil, cortex_util.Logger)
+ schemaConfig.SchemaConfig, limits, nil, nil, util_log.Logger)
if err != nil {
panic(err)
@@ -808,7 +808,7 @@ func TestStore_MultipleBoltDBShippersInConfig(t *testing.T) {
limits,
nil,
nil,
- cortex_util.Logger,
+ util_log.Logger,
)
require.NoError(t, err)
store, err := NewStore(config, schemaConfig, chunkStore, nil)
@@ -854,7 +854,7 @@ func TestStore_MultipleBoltDBShippersInConfig(t *testing.T) {
limits,
nil,
nil,
- cortex_util.Logger,
+ util_log.Logger,
)
require.NoError(t, err)
diff --git a/pkg/storage/stores/shipper/compactor/compactor.go b/pkg/storage/stores/shipper/compactor/compactor.go
index 7953af52189ff..56c04902a26dd 100644
--- a/pkg/storage/stores/shipper/compactor/compactor.go
+++ b/pkg/storage/stores/shipper/compactor/compactor.go
@@ -12,7 +12,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/storage"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
- pkg_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@@ -80,7 +80,7 @@ func (c *Compactor) loop(ctx context.Context) error {
runCompaction := func() {
err := c.Run(ctx)
if err != nil {
- level.Error(pkg_util.Logger).Log("msg", "failed to run compaction", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to run compaction", "err", err)
}
}
@@ -126,14 +126,14 @@ func (c *Compactor) Run(ctx context.Context) error {
table, err := newTable(ctx, filepath.Join(c.cfg.WorkingDirectory, tableName), c.objectClient)
if err != nil {
status = statusFailure
- level.Error(pkg_util.Logger).Log("msg", "failed to initialize table for compaction", "table", tableName, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to initialize table for compaction", "table", tableName, "err", err)
continue
}
err = table.compact()
if err != nil {
status = statusFailure
- level.Error(pkg_util.Logger).Log("msg", "failed to compact files", "table", tableName, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to compact files", "table", tableName, "err", err)
}
// check if context was cancelled before going for next table.
diff --git a/pkg/storage/stores/shipper/compactor/table.go b/pkg/storage/stores/shipper/compactor/table.go
index b0c1394260983..598bad2f2c50f 100644
--- a/pkg/storage/stores/shipper/compactor/table.go
+++ b/pkg/storage/stores/shipper/compactor/table.go
@@ -10,7 +10,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
util_math "github.com/cortexproject/cortex/pkg/util/math"
"github.com/go-kit/kit/log/level"
"go.etcd.io/bbolt"
@@ -68,17 +68,17 @@ func (t *table) compact() error {
return err
}
- level.Info(util.Logger).Log("msg", "listed files", "count", len(objects))
+ level.Info(util_log.Logger).Log("msg", "listed files", "count", len(objects))
if len(objects) < compactMinDBs {
- level.Info(util.Logger).Log("msg", fmt.Sprintf("skipping compaction since we have just %d files in storage", len(objects)))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("skipping compaction since we have just %d files in storage", len(objects)))
return nil
}
defer func() {
err := t.cleanup()
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to cleanup table", "name", t.name)
+ level.Error(util_log.Logger).Log("msg", "failed to cleanup table", "name", t.name)
}
}()
@@ -87,7 +87,7 @@ func (t *table) compact() error {
return err
}
- level.Info(util.Logger).Log("msg", "starting compaction of dbs")
+ level.Info(util_log.Logger).Log("msg", "starting compaction of dbs")
errChan := make(chan error)
readObjectChan := make(chan string)
@@ -123,7 +123,7 @@ func (t *table) compact() error {
err = t.readFile(downloadAt)
if err != nil {
- level.Error(util.Logger).Log("msg", "error reading file", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error reading file", "err", err)
return
}
case <-t.quit:
@@ -148,7 +148,7 @@ func (t *table) compact() error {
}
}
- level.Debug(util.Logger).Log("msg", "closing readObjectChan")
+ level.Debug(util_log.Logger).Log("msg", "closing readObjectChan")
close(readObjectChan)
}()
@@ -175,7 +175,7 @@ func (t *table) compact() error {
default:
}
- level.Info(util.Logger).Log("msg", "finished compacting the dbs")
+ level.Info(util_log.Logger).Log("msg", "finished compacting the dbs")
// upload the compacted db
err = t.upload()
@@ -219,7 +219,7 @@ func (t *table) writeBatch(batch []indexEntry) error {
// readFile reads a boltdb file from a path and writes the index in batched mode to compactedDB
func (t *table) readFile(path string) error {
- level.Debug(util.Logger).Log("msg", "reading file for compaction", "path", path)
+ level.Debug(util_log.Logger).Log("msg", "reading file for compaction", "path", path)
db, err := shipper_util.SafeOpenBoltdbFile(path)
if err != nil {
@@ -228,11 +228,11 @@ func (t *table) readFile(path string) error {
defer func() {
if err := db.Close(); err != nil {
- level.Error(util.Logger).Log("msg", "failed to close db", "path", path, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to close db", "path", path, "err", err)
}
if err = os.Remove(path); err != nil {
- level.Error(util.Logger).Log("msg", "failed to remove file", "path", path, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to remove file", "path", path, "err", err)
}
}()
@@ -305,23 +305,23 @@ func (t *table) upload() error {
defer func() {
if err := compressedDB.Close(); err != nil {
- level.Error(util.Logger).Log("msg", "failed to close file", "path", compactedDBPath, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to close file", "path", compactedDBPath, "err", err)
}
if err := os.Remove(compressedDBPath); err != nil {
- level.Error(util.Logger).Log("msg", "failed to remove file", "path", compressedDBPath, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to remove file", "path", compressedDBPath, "err", err)
}
}()
objectKey := fmt.Sprintf("%s.gz", shipper_util.BuildObjectKey(t.name, uploaderName, fmt.Sprint(time.Now().Unix())))
- level.Info(util.Logger).Log("msg", "uploading the compacted file", "objectKey", objectKey)
+ level.Info(util_log.Logger).Log("msg", "uploading the compacted file", "objectKey", objectKey)
return t.storageClient.PutObject(t.ctx, objectKey, compressedDB)
}
// removeObjectsFromStorage deletes objects from storage.
func (t *table) removeObjectsFromStorage(objects []chunk.StorageObject) error {
- level.Info(util.Logger).Log("msg", "removing source db files from storage", "count", len(objects))
+ level.Info(util_log.Logger).Log("msg", "removing source db files from storage", "count", len(objects))
for _, object := range objects {
err := t.storageClient.DeleteObject(t.ctx, object.Key)
diff --git a/pkg/storage/stores/shipper/downloads/table.go b/pkg/storage/stores/shipper/downloads/table.go
index 5fb869bbabb3b..87ddb64e02e76 100644
--- a/pkg/storage/stores/shipper/downloads/table.go
+++ b/pkg/storage/stores/shipper/downloads/table.go
@@ -14,7 +14,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
util_math "github.com/cortexproject/cortex/pkg/util/math"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/go-kit/kit/log"
@@ -90,7 +90,7 @@ func NewTable(spanCtx context.Context, name, cacheLocation string, storageClient
// Using background context to avoid cancellation of download when request times out.
// We would anyways need the files for serving next requests.
if err := table.init(ctx, log); err != nil {
- level.Error(util.Logger).Log("msg", "failed to download table", "name", table.name)
+ level.Error(util_log.Logger).Log("msg", "failed to download table", "name", table.name)
}
}()
@@ -134,7 +134,7 @@ func LoadTable(ctx context.Context, name, cacheLocation string, storageClient St
cancelFunc: func() {},
}
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("opening locally present files for table %s", name), "files", fmt.Sprint(filesInfo))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("opening locally present files for table %s", name), "files", fmt.Sprint(filesInfo))
for _, fileInfo := range filesInfo {
if fileInfo.IsDir() {
@@ -144,14 +144,14 @@ func LoadTable(ctx context.Context, name, cacheLocation string, storageClient St
// if we fail to open a boltdb file, lets skip it and let sync operation re-download the file from storage.
boltdb, err := shipper_util.SafeOpenBoltdbFile(filepath.Join(folderPath, fileInfo.Name()))
if err != nil {
- level.Error(util.Logger).Log("msg", fmt.Sprintf("failed to open existing boltdb file %s, continuing without it to let the sync operation catch up", filepath.Join(folderPath, fileInfo.Name())), "err", err)
+ level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to open existing boltdb file %s, continuing without it to let the sync operation catch up", filepath.Join(folderPath, fileInfo.Name())), "err", err)
continue
}
table.dbs[fileInfo.Name()] = boltdb
}
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("syncing files for table %s", name))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("syncing files for table %s", name))
// sync the table to get new files and remove the deleted ones from storage.
err = table.Sync(ctx)
if err != nil {
@@ -173,12 +173,12 @@ func (t *Table) init(ctx context.Context, spanLogger log.Logger) (err error) {
status = statusFailure
t.err = err
- level.Error(util.Logger).Log("msg", fmt.Sprintf("failed to initialize table %s, cleaning it up", t.name), "err", err)
+ level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to initialize table %s, cleaning it up", t.name), "err", err)
// cleaning up files due to error to avoid returning invalid results.
for fileName := range t.dbs {
if err := t.cleanupDB(fileName); err != nil {
- level.Error(util.Logger).Log("msg", "failed to cleanup partially downloaded file", "filename", fileName, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to cleanup partially downloaded file", "filename", fileName, "err", err)
}
}
}
@@ -195,7 +195,7 @@ func (t *Table) init(ctx context.Context, spanLogger log.Logger) (err error) {
return
}
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("list of files to download for period %s: %s", t.name, objects))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("list of files to download for period %s: %s", t.name, objects))
folderPath, err := t.folderPathForTable(true)
if err != nil {
@@ -252,7 +252,7 @@ func (t *Table) Close() {
for name, db := range t.dbs {
if err := db.Close(); err != nil {
- level.Error(util.Logger).Log("msg", fmt.Sprintf("failed to close file %s for table %s", name, t.name), "err", err)
+ level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to close file %s for table %s", name, t.name), "err", err)
}
}
@@ -363,14 +363,14 @@ func (t *Table) cleanupDB(fileName string) error {
// Sync downloads updated and new files from the storage relevant for the table and removes the deleted ones
func (t *Table) Sync(ctx context.Context) error {
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("syncing files for table %s", t.name))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("syncing files for table %s", t.name))
toDownload, toDelete, err := t.checkStorageForUpdates(ctx)
if err != nil {
return err
}
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("updates for table %s. toDownload: %s, toDelete: %s", t.name, toDownload, toDelete))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("updates for table %s. toDownload: %s, toDelete: %s", t.name, toDownload, toDelete))
for _, storageObject := range toDownload {
err = t.downloadFile(ctx, storageObject)
@@ -435,7 +435,7 @@ func (t *Table) checkStorageForUpdates(ctx context.Context) (toDownload []chunk.
// downloadFile first downloads file to a temp location so that we can close the existing db(if already exists), replace it with new one and then reopen it.
func (t *Table) downloadFile(ctx context.Context, storageObject chunk.StorageObject) error {
- level.Info(util.Logger).Log("msg", fmt.Sprintf("downloading object from storage with key %s", storageObject.Key))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("downloading object from storage with key %s", storageObject.Key))
dbName, err := getDBNameFromObjectKey(storageObject.Key)
if err != nil {
diff --git a/pkg/storage/stores/shipper/downloads/table_manager.go b/pkg/storage/stores/shipper/downloads/table_manager.go
index b8549e2b781d2..548306b5c47f1 100644
--- a/pkg/storage/stores/shipper/downloads/table_manager.go
+++ b/pkg/storage/stores/shipper/downloads/table_manager.go
@@ -14,7 +14,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
- pkg_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@@ -99,18 +99,18 @@ func (tm *TableManager) loop() {
case <-syncTicker.C:
err := tm.syncTables(tm.ctx)
if err != nil {
- level.Error(pkg_util.Logger).Log("msg", "error syncing local boltdb files with storage", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error syncing local boltdb files with storage", "err", err)
}
// we need to keep ensuring query readiness to download every days new table which would otherwise be downloaded only during queries.
err = tm.ensureQueryReadiness()
if err != nil {
- level.Error(pkg_util.Logger).Log("msg", "error ensuring query readiness of tables", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error ensuring query readiness of tables", "err", err)
}
case <-cacheCleanupTicker.C:
err := tm.cleanupCache()
if err != nil {
- level.Error(pkg_util.Logger).Log("msg", "error cleaning up expired tables", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error cleaning up expired tables", "err", err)
}
case <-tm.ctx.Done():
return
@@ -157,7 +157,7 @@ func (tm *TableManager) query(ctx context.Context, tableName string, queries []c
tm.tablesMtx.Lock()
defer tm.tablesMtx.Unlock()
- level.Error(pkg_util.Logger).Log("msg", fmt.Sprintf("table %s has some problem, cleaning it up", tableName), "err", table.Err())
+ level.Error(util_log.Logger).Log("msg", fmt.Sprintf("table %s has some problem, cleaning it up", tableName), "err", table.Err())
delete(tm.tables, tableName)
return table.Err()
@@ -179,7 +179,7 @@ func (tm *TableManager) getOrCreateTable(spanCtx context.Context, tableName stri
table, ok = tm.tables[tableName]
if !ok {
// table not found, creating one.
- level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("downloading all files for table %s", tableName))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("downloading all files for table %s", tableName))
table = NewTable(spanCtx, tableName, tm.cfg.CacheDir, tm.storageClient, tm.boltIndexClient, tm.metrics)
tm.tables[tableName] = table
@@ -205,7 +205,7 @@ func (tm *TableManager) syncTables(ctx context.Context) error {
tm.metrics.tablesSyncOperationTotal.WithLabelValues(status).Inc()
}()
- level.Info(pkg_util.Logger).Log("msg", "syncing tables")
+ level.Info(util_log.Logger).Log("msg", "syncing tables")
for _, table := range tm.tables {
err = table.Sync(ctx)
@@ -221,12 +221,12 @@ func (tm *TableManager) cleanupCache() error {
tm.tablesMtx.Lock()
defer tm.tablesMtx.Unlock()
- level.Info(pkg_util.Logger).Log("msg", "cleaning tables cache")
+ level.Info(util_log.Logger).Log("msg", "cleaning tables cache")
for name, table := range tm.tables {
lastUsedAt := table.LastUsedAt()
if lastUsedAt.Add(tm.cfg.CacheTTL).Before(time.Now()) {
- level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("cleaning up expired table %s", name))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("cleaning up expired table %s", name))
err := table.CleanupAllDBs()
if err != nil {
return err
@@ -237,7 +237,7 @@ func (tm *TableManager) cleanupCache() error {
// remove the directory where files for the table were downloaded.
err = os.RemoveAll(path.Join(tm.cfg.CacheDir, name))
if err != nil {
- level.Error(pkg_util.Logger).Log("msg", fmt.Sprintf("failed to remove directory for table %s", name), "err", err)
+ level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to remove directory for table %s", name), "err", err)
}
}
}
@@ -262,7 +262,7 @@ func (tm *TableManager) ensureQueryReadiness() error {
return err
}
- level.Debug(pkg_util.Logger).Log("msg", fmt.Sprintf("list of tables required for query-readiness %s", tableNames))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("list of tables required for query-readiness %s", tableNames))
for _, tableName := range tableNames {
tm.tablesMtx.RLock()
@@ -274,7 +274,7 @@ func (tm *TableManager) ensureQueryReadiness() error {
continue
}
- level.Info(pkg_util.Logger).Log("msg", "table required for query readiness does not exist locally, downloading it", "table-name", tableName)
+ level.Info(util_log.Logger).Log("msg", "table required for query readiness does not exist locally, downloading it", "table-name", tableName)
// table doesn't exist, download it.
table, err := LoadTable(tm.ctx, tableName, tm.cfg.CacheDir, tm.storageClient, tm.boltIndexClient, tm.metrics)
if err != nil {
@@ -340,7 +340,7 @@ func (tm *TableManager) loadLocalTables() error {
continue
}
- level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("loading local table %s", fileInfo.Name()))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading local table %s", fileInfo.Name()))
table, err := LoadTable(tm.ctx, fileInfo.Name(), tm.cfg.CacheDir, tm.storageClient, tm.boltIndexClient, tm.metrics)
if err != nil {
diff --git a/pkg/storage/stores/shipper/shipper_index_client.go b/pkg/storage/stores/shipper/shipper_index_client.go
index d2a3882e200c6..a9118dfa26e1b 100644
--- a/pkg/storage/stores/shipper/shipper_index_client.go
+++ b/pkg/storage/stores/shipper/shipper_index_client.go
@@ -13,7 +13,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/local"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
- pkg_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@@ -96,7 +96,7 @@ func NewShipper(cfg Config, storageClient chunk.ObjectClient, registerer prometh
return nil, err
}
- level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("starting boltdb shipper in %d mode", cfg.Mode))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("starting boltdb shipper in %d mode", cfg.Mode))
return &shipper, nil
}
diff --git a/pkg/storage/stores/shipper/table_client.go b/pkg/storage/stores/shipper/table_client.go
index aa72c41e18ea8..6d2f0661e40ee 100644
--- a/pkg/storage/stores/shipper/table_client.go
+++ b/pkg/storage/stores/shipper/table_client.go
@@ -8,7 +8,7 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/cortexproject/cortex/pkg/chunk"
- cortex_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/grafana/loki/pkg/storage/stores/util"
)
@@ -54,7 +54,7 @@ func (b *boltDBShipperTableClient) DeleteTable(ctx context.Context, name string)
}
if len(dirs) != 0 {
- level.Error(cortex_util.Logger).Log("msg", fmt.Sprintf("unexpected directories in %s folder, not touching them", name), "directories", fmt.Sprint(dirs))
+ level.Error(util_log.Logger).Log("msg", fmt.Sprintf("unexpected directories in %s folder, not touching them", name), "directories", fmt.Sprint(dirs))
}
for _, object := range objects {
diff --git a/pkg/storage/stores/shipper/uploads/table.go b/pkg/storage/stores/shipper/uploads/table.go
index 8520517e8f9fa..6d1f9d9612328 100644
--- a/pkg/storage/stores/shipper/uploads/table.go
+++ b/pkg/storage/stores/shipper/uploads/table.go
@@ -16,7 +16,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/local"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"go.etcd.io/bbolt"
@@ -116,10 +116,10 @@ func (lt *Table) Snapshot() error {
lt.dbSnapshotsMtx.Lock()
defer lt.dbSnapshotsMtx.Unlock()
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("snapshotting table %s", lt.name))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("snapshotting table %s", lt.name))
for name, db := range lt.dbs {
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("checking db %s for snapshot", name))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("checking db %s for snapshot", name))
srcWriteCount := 0
err := db.View(func(tx *bbolt.Tx) error {
srcWriteCount = db.Stats().TxStats.Write
@@ -176,10 +176,10 @@ func (lt *Table) Snapshot() error {
snapshot.writesCount = srcWriteCount
lt.dbSnapshots[name] = snapshot
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("finished snaphotting db %s", name))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("finished snaphotting db %s", name))
}
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("finished snapshotting table %s", lt.name))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("finished snapshotting table %s", lt.name))
return nil
}
@@ -270,7 +270,7 @@ func (lt *Table) Stop() {
for name, db := range lt.dbs {
if err := db.Close(); err != nil {
- level.Error(util.Logger).Log("msg", fmt.Errorf("failed to close file %s for table %s", name, lt.name))
+ level.Error(util_log.Logger).Log("msg", fmt.Errorf("failed to close file %s for table %s", name, lt.name))
}
}
@@ -337,7 +337,7 @@ func (lt *Table) Upload(ctx context.Context, force bool) error {
return err
}
- level.Info(util.Logger).Log("msg", fmt.Sprintf("uploading table %s", lt.name))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("uploading table %s", lt.name))
for name, db := range lt.dbs {
// doing string comparison between unix timestamps in string form since they are anyways of same length
@@ -364,13 +364,13 @@ func (lt *Table) Upload(ctx context.Context, force bool) error {
lt.dbUploadTimeMtx.Unlock()
}
- level.Info(util.Logger).Log("msg", fmt.Sprintf("finished uploading table %s", lt.name))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("finished uploading table %s", lt.name))
return nil
}
func (lt *Table) uploadDB(ctx context.Context, name string, db *bbolt.DB) error {
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("uploading db %s from table %s", name, lt.name))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("uploading db %s from table %s", name, lt.name))
filePath := path.Join(lt.path, fmt.Sprintf("%s%s", name, tempFileSuffix))
f, err := os.Create(filePath)
@@ -380,11 +380,11 @@ func (lt *Table) uploadDB(ctx context.Context, name string, db *bbolt.DB) error
defer func() {
if err := f.Close(); err != nil {
- level.Error(util.Logger).Log("msg", "failed to close temp file", "path", filePath, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to close temp file", "path", filePath, "err", err)
}
if err := os.Remove(filePath); err != nil {
- level.Error(util.Logger).Log("msg", "failed to remove temp file", "path", filePath, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to remove temp file", "path", filePath, "err", err)
}
}()
@@ -422,7 +422,7 @@ func (lt *Table) uploadDB(ctx context.Context, name string, db *bbolt.DB) error
// Cleanup removes dbs which are already uploaded and have not been modified for period longer than dbRetainPeriod.
// This is to avoid keeping all the files forever in the ingesters.
func (lt *Table) Cleanup(dbRetainPeriod time.Duration) error {
- level.Info(util.Logger).Log("msg", fmt.Sprintf("cleaning up unwanted dbs from table %s", lt.name))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("cleaning up unwanted dbs from table %s", lt.name))
var filesToCleanup []string
cutoffTime := time.Now().Add(-dbRetainPeriod)
@@ -443,14 +443,14 @@ func (lt *Table) Cleanup(dbRetainPeriod time.Duration) error {
lt.dbsMtx.RUnlock()
for i := range filesToCleanup {
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("removing db %s from table %s", filesToCleanup[i], lt.name))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("removing db %s from table %s", filesToCleanup[i], lt.name))
if err := lt.RemoveDB(filesToCleanup[i]); err != nil {
return err
}
if err := lt.RemoveSnapshotDB(filesToCleanup[i]); err != nil {
- level.Error(util.Logger).Log("msg", fmt.Sprintf("failed to remove snapshot db %s", filesToCleanup[i]))
+ level.Error(util_log.Logger).Log("msg", fmt.Sprintf("failed to remove snapshot db %s", filesToCleanup[i]))
}
}
@@ -485,7 +485,7 @@ func loadBoltDBsFromDir(dir string) (map[string]*bbolt.DB, error) {
// If an ingester is killed abruptly in the middle of an upload operation it could leave out a temp file which holds the snapshot of db for uploading.
// Cleaning up those temp files to avoid problems.
if err := os.Remove(filepath.Join(dir, fileInfo.Name())); err != nil {
- level.Error(util.Logger).Log("msg", "failed to remove temp file", "name", fileInfo.Name(), "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to remove temp file", "name", fileInfo.Name(), "err", err)
}
continue
}
diff --git a/pkg/storage/stores/shipper/uploads/table_manager.go b/pkg/storage/stores/shipper/uploads/table_manager.go
index aff13667fbf5b..9860d6b639f6e 100644
--- a/pkg/storage/stores/shipper/uploads/table_manager.go
+++ b/pkg/storage/stores/shipper/uploads/table_manager.go
@@ -14,7 +14,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/local"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
- pkg_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
@@ -84,7 +84,7 @@ func (tm *TableManager) loop() {
}
func (tm *TableManager) Stop() {
- level.Info(pkg_util.Logger).Log("msg", "stopping table manager")
+ level.Info(util_log.Logger).Log("msg", "stopping table manager")
tm.cancel()
tm.wg.Wait()
@@ -168,21 +168,21 @@ func (tm *TableManager) uploadTables(ctx context.Context, force bool) {
tm.tablesMtx.RLock()
defer tm.tablesMtx.RUnlock()
- level.Info(pkg_util.Logger).Log("msg", "uploading tables")
+ level.Info(util_log.Logger).Log("msg", "uploading tables")
status := statusSuccess
for _, table := range tm.tables {
err := table.Snapshot()
if err != nil {
// we do not want to stop uploading of dbs due to failures in snapshotting them so logging just the error here.
- level.Error(pkg_util.Logger).Log("msg", "failed to snapshot table for reads", "table", table.name, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to snapshot table for reads", "table", table.name, "err", err)
}
err = table.Upload(ctx, force)
if err != nil {
// continue uploading other tables while skipping cleanup for a failed one.
status = statusFailure
- level.Error(pkg_util.Logger).Log("msg", "failed to upload dbs", "table", table.name, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to upload dbs", "table", table.name, "err", err)
continue
}
@@ -190,7 +190,7 @@ func (tm *TableManager) uploadTables(ctx context.Context, force bool) {
err = table.Cleanup(tm.cfg.DBRetainPeriod)
if err != nil {
// we do not want to stop uploading of dbs due to failures in cleaning them up so logging just the error here.
- level.Error(pkg_util.Logger).Log("msg", "failed to cleanup uploaded dbs past their retention period", "table", table.name, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to cleanup uploaded dbs past their retention period", "table", table.name, "err", err)
}
}
@@ -218,7 +218,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
// since we are moving to keeping files for same table in a folder, if current element is a file we need to move it inside a directory with the same name
// i.e file index_123 would be moved to path index_123/index_123.
if !fileInfo.IsDir() {
- level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("found a legacy file %s, moving it to folder with same name", fileInfo.Name()))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("found a legacy file %s, moving it to folder with same name", fileInfo.Name()))
filePath := filepath.Join(tm.cfg.IndexDir, fileInfo.Name())
// create a folder with .temp suffix since we can't create a directory with same name as file.
@@ -238,7 +238,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
}
}
- level.Info(pkg_util.Logger).Log("msg", fmt.Sprintf("loading table %s", fileInfo.Name()))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading table %s", fileInfo.Name()))
table, err := LoadTable(filepath.Join(tm.cfg.IndexDir, fileInfo.Name()), tm.cfg.Uploader, tm.storageClient, tm.boltIndexClient)
if err != nil {
return nil, err
@@ -248,7 +248,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
// if table is nil it means it has no files in it so remove the folder for that table.
err := os.Remove(filepath.Join(tm.cfg.IndexDir, fileInfo.Name()))
if err != nil {
- level.Error(pkg_util.Logger).Log("msg", "failed to remove empty table folder", "table", fileInfo.Name(), "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to remove empty table folder", "table", fileInfo.Name(), "err", err)
}
continue
}
diff --git a/pkg/storage/stores/shipper/util/util.go b/pkg/storage/stores/shipper/util/util.go
index 8861043948d1b..82c99cf1b147b 100644
--- a/pkg/storage/stores/shipper/util/util.go
+++ b/pkg/storage/stores/shipper/util/util.go
@@ -13,7 +13,7 @@ import (
"github.com/grafana/loki/pkg/chunkenc"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
)
@@ -30,7 +30,7 @@ func GetFileFromStorage(ctx context.Context, storageClient StorageClient, object
defer func() {
if err := readCloser.Close(); err != nil {
- level.Error(util.Logger)
+ level.Error(util_log.Logger)
}
}()
@@ -52,7 +52,7 @@ func GetFileFromStorage(ctx context.Context, storageClient StorageClient, object
return err
}
- level.Info(util.Logger).Log("msg", fmt.Sprintf("downloaded file %s", objectKey))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("downloaded file %s", objectKey))
return f.Sync()
}
@@ -82,7 +82,7 @@ func BuildObjectKey(tableName, uploader, dbName string) string {
}
func CompressFile(src, dest string) error {
- level.Info(util.Logger).Log("msg", "compressing the file", "src", src, "dest", dest)
+ level.Info(util_log.Logger).Log("msg", "compressing the file", "src", src, "dest", dest)
uncompressedFile, err := os.Open(src)
if err != nil {
return err
@@ -90,7 +90,7 @@ func CompressFile(src, dest string) error {
defer func() {
if err := uncompressedFile.Close(); err != nil {
- level.Error(util.Logger).Log("msg", "failed to close uncompressed file", "path", src, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to close uncompressed file", "path", src, "err", err)
}
}()
@@ -101,7 +101,7 @@ func CompressFile(src, dest string) error {
defer func() {
if err := compressedFile.Close(); err != nil {
- level.Error(util.Logger).Log("msg", "failed to close compressed file", "path", dest, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to close compressed file", "path", dest, "err", err)
}
}()
diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go
index dd480d805cf57..ecc0cdf1b36c2 100644
--- a/pkg/storage/util_test.go
+++ b/pkg/storage/util_test.go
@@ -6,7 +6,7 @@ import (
"testing"
"time"
- pkg_util "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/cache"
@@ -202,7 +202,7 @@ func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from,
refs = append(refs, r)
}
- cache, err := cache.New(cache.Config{Prefix: "chunks"}, nil, pkg_util.Logger)
+ cache, err := cache.New(cache.Config{Prefix: "chunks"}, nil, util_log.Logger)
if err != nil {
panic(err)
}
diff --git a/pkg/util/config.go b/pkg/util/config.go
index 481d16c6ca7ec..9b803d9640ea7 100644
--- a/pkg/util/config.go
+++ b/pkg/util/config.go
@@ -5,7 +5,7 @@ import (
"io"
"strings"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/common/version"
"gopkg.in/yaml.v2"
@@ -22,7 +22,7 @@ func LogConfig(cfg interface{}) error {
cfgStr := string(lc)
cfgStrs := strings.Split(cfgStr, "\n")
for i := len(cfgStrs) - 1; i >= 0; i-- {
- level.Info(util.Logger).Log("type", "config", "msg", cfgStrs[i])
+ level.Info(util_log.Logger).Log("type", "config", "msg", cfgStrs[i])
}
return nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go
index 39b3cc7d67187..a93379bc7bba1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go
@@ -14,7 +14,6 @@ import (
"time"
"github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
"github.com/prometheus/alertmanager/api"
"github.com/prometheus/alertmanager/cluster"
"github.com/prometheus/alertmanager/config"
@@ -80,9 +79,6 @@ type Alertmanager struct {
// Further, in upstream AM, this metric is handled using the config coordinator which we don't use
// hence we need to generate the metric ourselves.
configHashMetric prometheus.Gauge
-
- activeMtx sync.Mutex
- active bool
}
var (
@@ -102,11 +98,9 @@ func init() {
// New creates a new Alertmanager.
func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) {
am := &Alertmanager{
- cfg: cfg,
- logger: log.With(cfg.Logger, "user", cfg.UserID),
- stop: make(chan struct{}),
- active: false,
- activeMtx: sync.Mutex{},
+ cfg: cfg,
+ logger: log.With(cfg.Logger, "user", cfg.UserID),
+ stop: make(chan struct{}),
configHashMetric: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "alertmanager_config_hash",
Help: "Hash of the currently loaded alertmanager configuration.",
@@ -269,55 +263,10 @@ func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config, rawCfg s
go am.dispatcher.Run()
go am.inhibitor.Run()
- // Ensure the alertmanager is set to active
- am.activeMtx.Lock()
- am.active = true
- am.activeMtx.Unlock()
-
am.configHashMetric.Set(md5HashAsMetricValue([]byte(rawCfg)))
return nil
}
-// IsActive returns if the alertmanager is currently running
-// or is paused
-func (am *Alertmanager) IsActive() bool {
- am.activeMtx.Lock()
- defer am.activeMtx.Unlock()
- return am.active
-}
-
-// Pause running jobs in the alertmanager that are able to be restarted and sets
-// to inactives
-func (am *Alertmanager) Pause() {
- // Set to inactive
- am.activeMtx.Lock()
- am.active = false
- am.activeMtx.Unlock()
-
- // Stop the inhibitor and dispatcher which will be recreated when
- // a new config is applied
- if am.inhibitor != nil {
- am.inhibitor.Stop()
- am.inhibitor = nil
- }
- if am.dispatcher != nil {
- am.dispatcher.Stop()
- am.dispatcher = nil
- }
-
- // Remove all of the active silences from the alertmanager
- silences, _, err := am.silences.Query()
- if err != nil {
- level.Warn(am.logger).Log("msg", "unable to retrieve silences for removal", "err", err)
- }
- for _, si := range silences {
- err = am.silences.Expire(si.Id)
- if err != nil {
- level.Warn(am.logger).Log("msg", "unable to remove silence", "err", err, "silence", si.Id)
- }
- }
-}
-
// Stop stops the Alertmanager.
func (am *Alertmanager) Stop() {
if am.inhibitor != nil {
@@ -330,6 +279,10 @@ func (am *Alertmanager) Stop() {
am.alerts.Close()
close(am.stop)
+}
+
+func (am *Alertmanager) StopAndWait() {
+ am.Stop()
am.wg.Wait()
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go
index 2617c58f3c483..0efeebde15ab8 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go
@@ -6,7 +6,7 @@ import (
"github.com/go-kit/kit/log/level"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -32,7 +32,7 @@ func writeMessage(w http.ResponseWriter, message string) {
}{Message: message})
if err != nil {
- level.Error(util.Logger).Log("msg", "unable to serve alertmanager ring page", "err", err)
+ level.Error(util_log.Logger).Log("msg", "unable to serve alertmanager ring page", "err", err)
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go
index d500bcafde5be..2d2a3de7a161d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go
@@ -154,6 +154,12 @@ func (m *alertmanagerMetrics) addUserRegistry(user string, reg *prometheus.Regis
m.regs.AddUserRegistry(user, reg)
}
+func (m *alertmanagerMetrics) removeUserRegistry(user string) {
+ // We neeed to go for a soft deletion here, as hard deletion requires
+ // that _all_ metrics except gauges are per-user.
+ m.regs.RemoveUserRegistry(user, false)
+}
+
func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) {
out <- m.alertsReceived
out <- m.alertsInvalid
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go
index 0a7bb17c5b09c..9fe2d9dea1fdb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go
@@ -10,8 +10,8 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/ring/kv"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const (
@@ -60,7 +60,7 @@ type RingConfig struct {
func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) {
hostname, err := os.Hostname()
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err)
os.Exit(1)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go
index 02c1d4d733d53..8b00eb79aa223 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go
@@ -10,7 +10,7 @@ import (
"github.com/cortexproject/cortex/pkg/alertmanager/alerts"
"github.com/cortexproject/cortex/pkg/chunk"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// Object Alert Storage Schema
@@ -60,7 +60,7 @@ func (a *AlertStore) getAlertConfig(ctx context.Context, key string) (alerts.Ale
return alerts.AlertConfigDesc{}, err
}
- defer runutil.CloseWithLogOnErr(util.Logger, readCloser, "close alert config reader")
+ defer runutil.CloseWithLogOnErr(util_log.Logger, readCloser, "close alert config reader")
buf, err := ioutil.ReadAll(readCloser)
if err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go
index 27f1784eb6e62..b80a508b8d7c6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go
@@ -4,7 +4,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
)
-func (r *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) {
+func (r *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) {
// When we initialize the alertmanager instance in the ring we want to start from
// a clean situation, so whatever is the state we set it JOINING, while we keep existing
// tokens (if any).
@@ -24,5 +24,5 @@ func (r *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler
func (r *MultitenantAlertmanager) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {}
func (r *MultitenantAlertmanager) OnRingInstanceStopping(_ *ring.BasicLifecycler) {}
-func (r *MultitenantAlertmanager) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) {
+func (r *MultitenantAlertmanager) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) {
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go
index a636122ba15e9..d74e5edfcc5c2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go
@@ -28,6 +28,7 @@ import (
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -244,11 +245,11 @@ type MultitenantAlertmanager struct {
// effect here.
fallbackConfig string
- // All the organization configurations that we have. Only used for instrumentation.
- cfgs map[string]alerts.AlertConfigDesc
-
alertmanagersMtx sync.Mutex
alertmanagers map[string]*Alertmanager
+ // Stores the current set of configurations we're running in each tenant's Alertmanager.
+ // Used for comparing configurations as we synchronize them.
+ cfgs map[string]alerts.AlertConfigDesc
logger log.Logger
alertmanagerMetrics *alertmanagerMetrics
@@ -522,7 +523,7 @@ func (am *MultitenantAlertmanager) loadAndSyncConfigs(ctx context.Context, syncR
func (am *MultitenantAlertmanager) stopping(_ error) error {
am.alertmanagersMtx.Lock()
for _, am := range am.alertmanagers {
- am.Stop()
+ am.StopAndWait()
}
am.alertmanagersMtx.Unlock()
if am.peer != nil { // Tests don't setup any peer.
@@ -604,17 +605,16 @@ func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfi
am.alertmanagersMtx.Lock()
defer am.alertmanagersMtx.Unlock()
- for user, userAM := range am.alertmanagers {
- if _, exists := cfgs[user]; !exists {
- // The user alertmanager is only paused in order to retain the prometheus metrics
- // it has reported to its registry. If a new config for this user appears, this structure
- // will be reused.
- level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", user)
- userAM.Pause()
- delete(am.cfgs, user)
- am.multitenantMetrics.lastReloadSuccessful.DeleteLabelValues(user)
- am.multitenantMetrics.lastReloadSuccessfulTimestamp.DeleteLabelValues(user)
- level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", user)
+ for userID, userAM := range am.alertmanagers {
+ if _, exists := cfgs[userID]; !exists {
+ level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", userID)
+ userAM.Stop()
+ delete(am.alertmanagers, userID)
+ delete(am.cfgs, userID)
+ am.multitenantMetrics.lastReloadSuccessful.DeleteLabelValues(userID)
+ am.multitenantMetrics.lastReloadSuccessfulTimestamp.DeleteLabelValues(userID)
+ am.alertmanagerMetrics.removeUserRegistry(userID)
+ level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", userID)
}
}
}
@@ -622,9 +622,6 @@ func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfi
// setConfig applies the given configuration to the alertmanager for `userID`,
// creating an alertmanager if it doesn't already exist.
func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error {
- am.alertmanagersMtx.Lock()
- existing, hasExisting := am.alertmanagers[cfg.User]
- am.alertmanagersMtx.Unlock()
var userAmConfig *amconfig.Config
var err error
var hasTemplateChanges bool
@@ -642,6 +639,10 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error {
level.Debug(am.logger).Log("msg", "setting config", "user", cfg.User)
+ am.alertmanagersMtx.Lock()
+ defer am.alertmanagersMtx.Unlock()
+ existing, hasExisting := am.alertmanagers[cfg.User]
+
rawCfg := cfg.RawConfig
if cfg.RawConfig == "" {
if am.fallbackConfig == "" {
@@ -694,9 +695,7 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error {
if err != nil {
return err
}
- am.alertmanagersMtx.Lock()
am.alertmanagers[cfg.User] = newAM
- am.alertmanagersMtx.Unlock()
} else if am.cfgs[cfg.User].RawConfig != cfg.RawConfig || hasTemplateChanges {
level.Info(am.logger).Log("msg", "updating new per-tenant alertmanager", "user", cfg.User)
// If the config changed, apply the new one.
@@ -714,7 +713,7 @@ func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amco
newAM, err := New(&Config{
UserID: userID,
DataDir: am.cfg.DataDir,
- Logger: util.Logger,
+ Logger: util_log.Logger,
Peer: am.peer,
PeerTimeout: am.cfg.Cluster.PeerTimeout,
Retention: am.cfg.Retention,
@@ -749,11 +748,6 @@ func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Re
am.alertmanagersMtx.Unlock()
if ok {
- if !userAM.IsActive() {
- level.Debug(am.logger).Log("msg", "the Alertmanager is not active", "user", userID)
- http.Error(w, "the Alertmanager is not configured", http.StatusNotFound)
- return
- }
userAM.mux.ServeHTTP(w, req)
return
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go
index 84564fb4f0d64..d3c67f710542d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go
@@ -790,7 +790,7 @@ func awsSessionFromURL(awsURL *url.URL) (client.ConfigProvider, error) {
}
path := strings.TrimPrefix(awsURL.Path, "/")
if len(path) > 0 {
- level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)
+ level.Warn(log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)
}
config, err := awscommon.ConfigFromURL(awsURL)
if err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go
index fea098c82334a..b8aae77f5235b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go
@@ -14,7 +14,7 @@ import (
"github.com/weaveworks/common/mtime"
"github.com/cortexproject/cortex/pkg/chunk"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const (
@@ -112,7 +112,7 @@ func (m *metricsData) UpdateTable(ctx context.Context, current chunk.TableDesc,
throttleRate := m.throttleRates[expected.Name]
usageRate := m.usageRates[expected.Name]
- level.Info(util.Logger).Log("msg", "checking write metrics", "table", current.Name, "queueLengths", fmt.Sprint(m.queueLengths), "throttleRate", throttleRate, "usageRate", usageRate)
+ level.Info(util_log.Logger).Log("msg", "checking write metrics", "table", current.Name, "queueLengths", fmt.Sprint(m.queueLengths), "throttleRate", throttleRate, "usageRate", usageRate)
switch {
case throttleRate < throttleFractionScaledown*float64(current.ProvisionedWrite) &&
@@ -170,7 +170,7 @@ func (m *metricsData) UpdateTable(ctx context.Context, current chunk.TableDesc,
readUsageRate := m.usageReadRates[expected.Name]
readErrorRate := m.readErrorRates[expected.Name]
- level.Info(util.Logger).Log("msg", "checking read metrics", "table", current.Name, "errorRate", readErrorRate, "readUsageRate", readUsageRate)
+ level.Info(util_log.Logger).Log("msg", "checking read metrics", "table", current.Name, "errorRate", readErrorRate, "readUsageRate", readUsageRate)
// Read Scaling
switch {
// the table is at low/minimum capacity and it is being used -> scale up
@@ -235,14 +235,14 @@ func scaleDown(tableName string, currentValue, minValue int64, newValue int64, l
earliest := lastUpdated[tableName].Add(time.Duration(coolDown) * time.Second)
if earliest.After(mtime.Now()) {
- level.Info(util.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest, "op", operation)
+ level.Info(util_log.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest, "op", operation)
return currentValue
}
// Reject a change that is less than 20% - AWS rate-limits scale-downs so save
// our chances until it makes a bigger difference
if newValue > currentValue*4/5 {
- level.Info(util.Logger).Log("msg", "rejected de minimis "+msg, "table", tableName, "current", currentValue, "proposed", newValue, "op", operation)
+ level.Info(util_log.Logger).Log("msg", "rejected de minimis "+msg, "table", tableName, "current", currentValue, "proposed", newValue, "op", operation)
return currentValue
}
@@ -254,12 +254,12 @@ func scaleDown(tableName string, currentValue, minValue int64, newValue int64, l
totalUsage += u
}
if totalUsage < minUsageForScaledown {
- level.Info(util.Logger).Log("msg", "rejected low usage "+msg, "table", tableName, "totalUsage", totalUsage, "op", operation)
+ level.Info(util_log.Logger).Log("msg", "rejected low usage "+msg, "table", tableName, "totalUsage", totalUsage, "op", operation)
return currentValue
}
}
- level.Info(util.Logger).Log("msg", msg, "table", tableName, operation, newValue)
+ level.Info(util_log.Logger).Log("msg", msg, "table", tableName, operation, newValue)
lastUpdated[tableName] = mtime.Now()
return newValue
}
@@ -270,12 +270,12 @@ func scaleUp(tableName string, currentValue, maxValue int64, newValue int64, las
}
earliest := lastUpdated[tableName].Add(time.Duration(coolDown) * time.Second)
if !earliest.After(mtime.Now()) && newValue > currentValue {
- level.Info(util.Logger).Log("msg", msg, "table", tableName, operation, newValue)
+ level.Info(util_log.Logger).Log("msg", msg, "table", tableName, operation, newValue)
lastUpdated[tableName] = mtime.Now()
return newValue
}
- level.Info(util.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest)
+ level.Info(util_log.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest)
return currentValue
}
@@ -362,7 +362,7 @@ func promQuery(ctx context.Context, promAPI promV1.API, query string, duration,
return nil, err
}
if wrngs != nil {
- level.Warn(util.Logger).Log(
+ level.Warn(util_log.Logger).Log(
"query", query,
"start", queryRange.Start,
"end", queryRange.End,
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go
index 864e410dd5c13..a6e09b361498c 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go
@@ -18,7 +18,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/go-kit/kit/log/level"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const arnPrefix = "arn:"
@@ -234,7 +234,7 @@ func (m *mockDynamoDBClient) QueryPagesWithContext(ctx aws.Context, input *dynam
continue
}
} else {
- level.Warn(util.Logger).Log("msg", "unsupported FilterExpression", "expression", *input.FilterExpression)
+ level.Warn(util_log.Logger).Log("msg", "unsupported FilterExpression", "expression", *input.FilterExpression)
}
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go
index 1e638f6091f0f..2c802b764fab8 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go
@@ -19,8 +19,8 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/util"
- pkgutil "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// Config for a StorageClient
@@ -109,7 +109,7 @@ func (cfg *Config) session(name string, reg prometheus.Registerer) (*gocql.Sessi
cluster.ConnectTimeout = cfg.ConnectTimeout
cluster.ReconnectInterval = cfg.ReconnectInterval
cluster.NumConns = cfg.NumConnections
- cluster.Logger = log.With(pkgutil.Logger, "module", "gocql", "client", name)
+ cluster.Logger = log.With(util_log.Logger, "module", "gocql", "client", name)
cluster.Registerer = prometheus.WrapRegistererWith(
prometheus.Labels{"client": name}, reg)
if cfg.Retries > 0 {
@@ -536,7 +536,7 @@ type noopConvictionPolicy struct{}
// Convicted means connections are removed - we don't want that.
// Implementats gocql.ConvictionPolicy.
func (noopConvictionPolicy) AddFailure(err error, host *gocql.HostInfo) bool {
- level.Error(pkgutil.Logger).Log("msg", "Cassandra host failure", "err", err, "host", host.String())
+ level.Error(util_log.Logger).Log("msg", "Cassandra host failure", "err", err, "host", host.String())
return false
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
index 15f566ffbb40d..0fc096d3c801e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go
@@ -540,7 +540,7 @@ func (c *baseStore) lookupEntriesByQueries(ctx context.Context, queries []IndexQ
return true
})
if err != nil {
- level.Error(util_log.WithContext(ctx, util.Logger)).Log("msg", "error querying storage", "err", err)
+ level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "error querying storage", "err", err)
}
return entries, err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go
index 061a9b1c638a1..aeb9048854097 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go
@@ -10,7 +10,7 @@ import (
"github.com/prometheus/prometheus/promql"
"github.com/cortexproject/cortex/pkg/chunk/cache"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
)
@@ -211,7 +211,7 @@ func (c *Fetcher) processCacheResponse(ctx context.Context, chunks []Chunk, keys
missing = append(missing, chunks[i])
i++
} else if chunkKey > keys[j] {
- level.Warn(util.Logger).Log("msg", "got chunk from cache we didn't ask for")
+ level.Warn(util_log.Logger).Log("msg", "got chunk from cache we didn't ask for")
j++
} else {
requests = append(requests, decodeRequest{
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go
index 6d76abf9e3d09..bbb814badadf2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go
@@ -17,7 +17,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
var (
@@ -94,7 +94,7 @@ func (b *BoltIndexClient) reload() {
for name := range b.dbs {
if _, err := os.Stat(path.Join(b.cfg.Directory, name)); err != nil && os.IsNotExist(err) {
removedDBs = append(removedDBs, name)
- level.Debug(util.Logger).Log("msg", "boltdb file got removed", "filename", name)
+ level.Debug(util_log.Logger).Log("msg", "boltdb file got removed", "filename", name)
continue
}
}
@@ -106,7 +106,7 @@ func (b *BoltIndexClient) reload() {
for _, name := range removedDBs {
if err := b.dbs[name].Close(); err != nil {
- level.Error(util.Logger).Log("msg", "failed to close removed boltdb", "filename", name, "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to close removed boltdb", "filename", name, "err", err)
continue
}
delete(b.dbs, name)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go
index f64776943481c..ff9b5e44b2c9d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go
@@ -14,7 +14,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/chunk/util"
- pkgUtil "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// FSConfig is the config for a FSObjectClient.
@@ -80,7 +80,7 @@ func (f *FSObjectClient) PutObject(_ context.Context, objectKey string, object i
return err
}
- defer runutil.CloseWithLogOnErr(pkgUtil.Logger, fl, "fullPath: %s", fullPath)
+ defer runutil.CloseWithLogOnErr(util_log.Logger, fl, "fullPath: %s", fullPath)
_, err = io.Copy(fl, object)
if err != nil {
@@ -187,7 +187,7 @@ func (f *FSObjectClient) DeleteObject(ctx context.Context, objectKey string) err
func (f *FSObjectClient) DeleteChunksBefore(ctx context.Context, ts time.Time) error {
return filepath.Walk(f.cfg.Directory, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() && info.ModTime().Before(ts) {
- level.Info(pkgUtil.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name())
+ level.Info(util_log.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name())
if err := os.Remove(path); err != nil {
return err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go
index 0799716afad54..d8fc70d788d51 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go
@@ -15,6 +15,7 @@ import (
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
type deleteRequestHandlerMetrics struct {
@@ -107,7 +108,7 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r
}
if err := dm.deleteStore.AddDeleteRequest(ctx, userID, model.Time(startTime), model.Time(endTime), match); err != nil {
- level.Error(util.Logger).Log("msg", "error adding delete request to the store", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error adding delete request to the store", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -127,13 +128,13 @@ func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWrite
deleteRequests, err := dm.deleteStore.GetAllDeleteRequestsForUser(ctx, userID)
if err != nil {
- level.Error(util.Logger).Log("msg", "error getting delete requests from the store", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error getting delete requests from the store", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := json.NewEncoder(w).Encode(deleteRequests); err != nil {
- level.Error(util.Logger).Log("msg", "error marshalling response", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error marshalling response", "err", err)
http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError)
}
}
@@ -152,7 +153,7 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter
deleteRequest, err := dm.deleteStore.GetDeleteRequest(ctx, userID, requestID)
if err != nil {
- level.Error(util.Logger).Log("msg", "error getting delete request from the store", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error getting delete request from the store", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -173,7 +174,7 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter
}
if err := dm.deleteStore.RemoveDeleteRequest(ctx, userID, requestID, deleteRequest.CreatedAt, deleteRequest.StartTime, deleteRequest.EndTime); err != nil {
- level.Error(util.Logger).Log("msg", "error cancelling the delete request", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error cancelling the delete request", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go
index 73348bf40ada2..fdf2cc0914de1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go
@@ -15,7 +15,7 @@ import (
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql/parser"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const tombstonesReloadDuration = 5 * time.Minute
@@ -97,7 +97,7 @@ func (tl *TombstonesLoader) loop() {
case <-tombstonesReloadTimer.C:
err := tl.reloadTombstones()
if err != nil {
- level.Error(util.Logger).Log("msg", "error reloading tombstones", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error reloading tombstones", "err", err)
}
case <-tl.quit:
return
@@ -285,7 +285,7 @@ func (tl *TombstonesLoader) getCacheGenNumbersPerTenants(tenantIDs []string) *ca
if numbers.results != "" {
results, err := strconv.Atoi(numbers.results)
if err != nil {
- level.Error(util.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err)
} else if maxResults < results {
maxResults = results
result.results = numbers.results
@@ -296,7 +296,7 @@ func (tl *TombstonesLoader) getCacheGenNumbersPerTenants(tenantIDs []string) *ca
if numbers.store != "" {
store, err := strconv.Atoi(numbers.store)
if err != nil {
- level.Error(util.Logger).Log("msg", "error parsing storeCacheGenNumber", "user", tenantID, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error parsing storeCacheGenNumber", "user", tenantID, "err", err)
} else if maxStore < store {
maxStore = store
result.store = numbers.store
@@ -326,7 +326,7 @@ func (tl *TombstonesLoader) getCacheGenNumbers(userID string) *cacheGenNumbers {
genNumbers, err := tl.deleteStore.getCacheGenerationNumbers(context.Background(), userID)
if err != nil {
- level.Error(util.Logger).Log("msg", "error loading cache generation numbers", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error loading cache generation numbers", "err", err)
tl.metrics.cacheGenLoadFailures.Inc()
return &cacheGenNumbers{}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go
index 441fc5f84cdd9..6f6cc8f4d3018 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go
@@ -14,7 +14,7 @@ import (
"github.com/prometheus/prometheus/pkg/labels"
"github.com/cortexproject/cortex/pkg/querier/astmapper"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const (
@@ -882,7 +882,7 @@ func (v10Entries) FilterReadQueries(queries []IndexQuery, shard *astmapper.Shard
s := strings.Split(query.HashValue, ":")[0]
n, err := strconv.Atoi(s)
if err != nil {
- level.Error(util.Logger).Log(
+ level.Error(util_log.Logger).Log(
"msg",
"Unable to determine shard from IndexQuery",
"HashValue",
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
index 6097b434b1033..310404cd13352 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
@@ -23,7 +23,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk/objectclient"
"github.com/cortexproject/cortex/pkg/chunk/openstack"
"github.com/cortexproject/cortex/pkg/chunk/purger"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// Supported storage engines
@@ -105,7 +105,7 @@ func (cfg *Config) Validate() error {
if err := cfg.CassandraStorageConfig.Validate(); err != nil {
return errors.Wrap(err, "invalid Cassandra Storage config")
}
- if err := cfg.GCPStorageConfig.Validate(util.Logger); err != nil {
+ if err := cfg.GCPStorageConfig.Validate(util_log.Logger); err != nil {
return errors.Wrap(err, "invalid GCP Storage Storage config")
}
if err := cfg.Swift.Validate(); err != nil {
@@ -222,7 +222,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, regis
}
path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/")
if len(path) > 0 {
- level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)
+ level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)
}
return aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer)
case "gcp":
@@ -256,7 +256,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, regis
}
path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/")
if len(path) > 0 {
- level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)
+ level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)
}
return aws.NewDynamoDBChunkClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer)
case "azure":
@@ -308,7 +308,7 @@ func NewTableClient(name string, cfg Config, registerer prometheus.Registerer) (
}
path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/")
if len(path) > 0 {
- level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)
+ level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path)
}
return aws.NewDynamoDBTableClient(cfg.AWSStorageConfig.DynamoDBConfig, registerer)
case "gcp", "gcp-columnkey", "bigtable", "bigtable-hashed":
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go
index eda8a83f753fd..c4f46830471ea 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go
@@ -18,7 +18,7 @@ import (
"github.com/weaveworks/common/instrument"
"github.com/weaveworks/common/mtime"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -215,7 +215,7 @@ func (m *TableManager) loop(ctx context.Context) error {
if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", instrument.NewHistogramCollector(m.metrics.syncTableDuration), instrument.ErrorCode, func(ctx context.Context) error {
return m.SyncTables(ctx)
}); err != nil {
- level.Error(util.Logger).Log("msg", "error syncing tables", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error syncing tables", "err", err)
}
// Sleep for a bit to spread the sync load across different times if the tablemanagers are all started at once.
@@ -231,7 +231,7 @@ func (m *TableManager) loop(ctx context.Context) error {
if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", instrument.NewHistogramCollector(m.metrics.syncTableDuration), instrument.ErrorCode, func(ctx context.Context) error {
return m.SyncTables(ctx)
}); err != nil {
- level.Error(util.Logger).Log("msg", "error syncing tables", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error syncing tables", "err", err)
}
case <-ctx.Done():
return nil
@@ -254,7 +254,7 @@ func (m *TableManager) checkAndCreateExtraTables() error {
for _, tableDesc := range extraTables.Tables {
if _, ok := existingTablesMap[tableDesc.Name]; !ok {
// creating table
- level.Info(util.Logger).Log("msg", "creating extra table",
+ level.Info(util_log.Logger).Log("msg", "creating extra table",
"tableName", tableDesc.Name,
"provisionedRead", tableDesc.ProvisionedRead,
"provisionedWrite", tableDesc.ProvisionedWrite,
@@ -272,7 +272,7 @@ func (m *TableManager) checkAndCreateExtraTables() error {
continue
}
- level.Info(util.Logger).Log("msg", "checking throughput of extra table", "table", tableDesc.Name)
+ level.Info(util_log.Logger).Log("msg", "checking throughput of extra table", "table", tableDesc.Name)
// table already exists, lets check actual throughput for tables is same as what is in configurations, if not let us update it
current, _, err := extraTables.TableClient.DescribeTable(context.Background(), tableDesc.Name)
if err != nil {
@@ -280,7 +280,7 @@ func (m *TableManager) checkAndCreateExtraTables() error {
}
if !current.Equals(tableDesc) {
- level.Info(util.Logger).Log("msg", "updating throughput of extra table",
+ level.Info(util_log.Logger).Log("msg", "updating throughput of extra table",
"table", tableDesc.Name,
"tableName", tableDesc.Name,
"provisionedRead", tableDesc.ProvisionedRead,
@@ -305,7 +305,7 @@ func (m *TableManager) bucketRetentionIteration(ctx context.Context) error {
err := m.bucketClient.DeleteChunksBefore(ctx, mtime.Now().Add(-m.cfg.RetentionPeriod))
if err != nil {
- level.Error(util.Logger).Log("msg", "error enforcing filesystem retention", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error enforcing filesystem retention", "err", err)
}
// don't return error, otherwise timer service would stop.
@@ -321,7 +321,7 @@ func (m *TableManager) SyncTables(ctx context.Context) error {
}
expected := m.calculateExpectedTables()
- level.Info(util.Logger).Log("msg", "synching tables", "expected_tables", len(expected))
+ level.Info(util_log.Logger).Log("msg", "synching tables", "expected_tables", len(expected))
toCreate, toCheckThroughput, toDelete, err := m.partitionTables(ctx, expected)
if err != nil {
@@ -473,7 +473,7 @@ func (m *TableManager) createTables(ctx context.Context, descriptions []TableDes
merr := tsdb_errors.NewMulti()
for _, desc := range descriptions {
- level.Info(util.Logger).Log("msg", "creating table", "table", desc.Name)
+ level.Info(util_log.Logger).Log("msg", "creating table", "table", desc.Name)
err := m.client.CreateTable(ctx, desc)
if err != nil {
numFailures++
@@ -490,12 +490,12 @@ func (m *TableManager) deleteTables(ctx context.Context, descriptions []TableDes
merr := tsdb_errors.NewMulti()
for _, desc := range descriptions {
- level.Info(util.Logger).Log("msg", "table has exceeded the retention period", "table", desc.Name)
+ level.Info(util_log.Logger).Log("msg", "table has exceeded the retention period", "table", desc.Name)
if !m.cfg.RetentionDeletesEnabled {
continue
}
- level.Info(util.Logger).Log("msg", "deleting table", "table", desc.Name)
+ level.Info(util_log.Logger).Log("msg", "deleting table", "table", desc.Name)
err := m.client.DeleteTable(ctx, desc.Name)
if err != nil {
numFailures++
@@ -509,7 +509,7 @@ func (m *TableManager) deleteTables(ctx context.Context, descriptions []TableDes
func (m *TableManager) updateTables(ctx context.Context, descriptions []TableDesc) error {
for _, expected := range descriptions {
- level.Debug(util.Logger).Log("msg", "checking provisioned throughput on table", "table", expected.Name)
+ level.Debug(util_log.Logger).Log("msg", "checking provisioned throughput on table", "table", expected.Name)
current, isActive, err := m.client.DescribeTable(ctx, expected.Name)
if err != nil {
return err
@@ -523,12 +523,12 @@ func (m *TableManager) updateTables(ctx context.Context, descriptions []TableDes
}
if !isActive {
- level.Info(util.Logger).Log("msg", "skipping update on table, not yet ACTIVE", "table", expected.Name)
+ level.Info(util_log.Logger).Log("msg", "skipping update on table, not yet ACTIVE", "table", expected.Name)
continue
}
if expected.Equals(current) {
- level.Info(util.Logger).Log("msg", "provisioned throughput on table, skipping", "table", current.Name, "read", current.ProvisionedRead, "write", current.ProvisionedWrite)
+ level.Info(util_log.Logger).Log("msg", "provisioned throughput on table, skipping", "table", current.Name, "read", current.ProvisionedRead, "write", current.ProvisionedWrite)
continue
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
index 583b38e3cb088..78e2e9a6d684a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
@@ -34,8 +34,48 @@ import (
var (
errInvalidBlockRanges = "compactor block range periods should be divisible by the previous one, but %s is not divisible by %s"
RingOp = ring.NewOp([]ring.IngesterState{ring.ACTIVE}, nil)
+
+ DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, garbageCollectedBlocks prometheus.Counter) compact.Grouper {
+ return compact.NewDefaultGrouper(
+ logger,
+ bkt,
+ false, // Do not accept malformed indexes
+ true, // Enable vertical compaction
+ reg,
+ blocksMarkedForDeletion,
+ garbageCollectedBlocks)
+ }
+
+ DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, compact.Planner, error) {
+ compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ planner := compact.NewTSDBBasedPlanner(logger, cfg.BlockRanges.ToMilliseconds())
+ return compactor, planner, nil
+ }
)
+// BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks.
+type BlocksGrouperFactory func(
+ ctx context.Context,
+ cfg Config,
+ bkt objstore.Bucket,
+ logger log.Logger,
+ reg prometheus.Registerer,
+ blocksMarkedForDeletion prometheus.Counter,
+ garbageCollectedBlocks prometheus.Counter,
+) compact.Grouper
+
+// BlocksCompactorFactory builds and returns the compactor and planner to use to compact a tenant's blocks.
+type BlocksCompactorFactory func(
+ ctx context.Context,
+ cfg Config,
+ logger log.Logger,
+ reg prometheus.Registerer,
+) (compact.Compactor, compact.Planner, error)
+
// Config holds the Compactor config.
type Config struct {
BlockRanges cortex_tsdb.DurationList `yaml:"block_ranges"`
@@ -66,6 +106,10 @@ type Config struct {
// it in tests.
retryMinBackoff time.Duration `yaml:"-"`
retryMaxBackoff time.Duration `yaml:"-"`
+
+ // Allow downstream projects to customise the blocks compactor.
+ BlocksGrouperFactory BlocksGrouperFactory `yaml:"-"`
+ BlocksCompactorFactory BlocksCompactorFactory `yaml:"-"`
}
// RegisterFlags registers the Compactor flags.
@@ -124,9 +168,11 @@ type Compactor struct {
// If empty, no users are disabled. If not empty, users in the map are disabled (not owned by this compactor).
disabledUsers map[string]struct{}
- // Function that creates bucket client, TSDB planner and compactor using the context.
+ // Functions that creates bucket client, grouper, planner and compactor using the context.
// Useful for injecting mock objects from tests.
- createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error)
+ bucketClientFactory func(ctx context.Context) (objstore.Bucket, error)
+ blocksGrouperFactory BlocksGrouperFactory
+ blocksCompactorFactory BlocksCompactorFactory
// Users scanner, used to discover users from the bucket.
usersScanner *cortex_tsdb.UsersScanner
@@ -135,8 +181,8 @@ type Compactor struct {
blocksCleaner *BlocksCleaner
// Underlying compactor and planner used to compact TSDB blocks.
- tsdbCompactor tsdb.Compactor
- tsdbPlanner compact.Planner
+ blocksCompactor compact.Compactor
+ blocksPlanner compact.Planner
// Client used to run operations on the bucket storing blocks.
bucketClient objstore.Bucket
@@ -165,22 +211,21 @@ type Compactor struct {
// NewCompactor makes a new Compactor.
func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) {
- createDependencies := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) {
- bucketClient, err := bucket.NewClient(ctx, storageCfg.Bucket, "compactor", logger, registerer)
- if err != nil {
- return nil, nil, nil, errors.Wrap(err, "failed to create the bucket client")
- }
+ bucketClientFactory := func(ctx context.Context) (objstore.Bucket, error) {
+ return bucket.NewClient(ctx, storageCfg.Bucket, "compactor", logger, registerer)
+ }
- compactor, err := tsdb.NewLeveledCompactor(ctx, registerer, logger, compactorCfg.BlockRanges.ToMilliseconds(), downsample.NewPool())
- if err != nil {
- return nil, nil, nil, err
- }
+ blocksGrouperFactory := compactorCfg.BlocksGrouperFactory
+ if blocksGrouperFactory == nil {
+ blocksGrouperFactory = DefaultBlocksGrouperFactory
+ }
- planner := compact.NewTSDBBasedPlanner(logger, compactorCfg.BlockRanges.ToMilliseconds())
- return bucketClient, compactor, planner, nil
+ blocksCompactorFactory := compactorCfg.BlocksCompactorFactory
+ if blocksCompactorFactory == nil {
+ blocksCompactorFactory = DefaultBlocksCompactorFactory
}
- cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createDependencies)
+ cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory)
if err != nil {
return nil, errors.Wrap(err, "failed to create Cortex blocks compactor")
}
@@ -193,16 +238,20 @@ func newCompactor(
storageCfg cortex_tsdb.BlocksStorageConfig,
logger log.Logger,
registerer prometheus.Registerer,
- createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error),
+ bucketClientFactory func(ctx context.Context) (objstore.Bucket, error),
+ blocksGrouperFactory BlocksGrouperFactory,
+ blocksCompactorFactory BlocksCompactorFactory,
) (*Compactor, error) {
c := &Compactor{
- compactorCfg: compactorCfg,
- storageCfg: storageCfg,
- parentLogger: logger,
- logger: log.With(logger, "component", "compactor"),
- registerer: registerer,
- syncerMetrics: newSyncerMetrics(registerer),
- createDependencies: createDependencies,
+ compactorCfg: compactorCfg,
+ storageCfg: storageCfg,
+ parentLogger: logger,
+ logger: log.With(logger, "component", "compactor"),
+ registerer: registerer,
+ syncerMetrics: newSyncerMetrics(registerer),
+ bucketClientFactory: bucketClientFactory,
+ blocksGrouperFactory: blocksGrouperFactory,
+ blocksCompactorFactory: blocksCompactorFactory,
compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
Name: "cortex_compactor_runs_started_total",
@@ -273,10 +322,16 @@ func newCompactor(
func (c *Compactor) starting(ctx context.Context) error {
var err error
- // Create bucket client and compactor.
- c.bucketClient, c.tsdbCompactor, c.tsdbPlanner, err = c.createDependencies(ctx)
+ // Create bucket client.
+ c.bucketClient, err = c.bucketClientFactory(ctx)
if err != nil {
- return errors.Wrap(err, "failed to initialize compactor objects")
+ return errors.Wrap(err, "failed to create bucket client")
+ }
+
+ // Create blocks compactor dependencies.
+ c.blocksCompactor, c.blocksPlanner, err = c.blocksCompactorFactory(ctx, c.compactorCfg, c.logger, c.registerer)
+ if err != nil {
+ return errors.Wrap(err, "failed to initialize compactor dependencies")
}
// Wrap the bucket client to write block deletion marks in the global location too.
@@ -545,22 +600,12 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error {
return errors.Wrap(err, "failed to create syncer")
}
- grouper := compact.NewDefaultGrouper(
- ulogger,
- bucket,
- false, // Do not accept malformed indexes
- true, // Enable vertical compaction
- reg,
- c.blocksMarkedForDeletion,
- c.garbageCollectedBlocks,
- )
-
compactor, err := compact.NewBucketCompactor(
ulogger,
syncer,
- grouper,
- c.tsdbPlanner,
- c.tsdbCompactor,
+ c.blocksGrouperFactory(ctx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.garbageCollectedBlocks),
+ c.blocksPlanner,
+ c.blocksCompactor,
path.Join(c.compactorCfg.DataDir, "compact"),
bucket,
c.compactorCfg.CompactionConcurrency,
diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go
index 2c0608ea056b9..d73c7a6890670 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go
@@ -6,7 +6,7 @@ import (
"github.com/go-kit/kit/log/level"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -32,7 +32,7 @@ func writeMessage(w http.ResponseWriter, message string) {
}{Message: message})
if err != nil {
- level.Error(util.Logger).Log("msg", "unable to serve compactor ring page", "err", err)
+ level.Error(util_log.Logger).Log("msg", "unable to serve compactor ring page", "err", err)
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go
index a39d76ef88368..d10f675f5e002 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go
@@ -9,8 +9,8 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/ring/kv"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// RingConfig masks the ring lifecycler config which contains
@@ -40,7 +40,7 @@ type RingConfig struct {
func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) {
hostname, err := os.Hostname()
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err)
os.Exit(1)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go
index f3ba2f491a711..43873c2f0df7b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go
@@ -6,6 +6,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// Copied from Thanos, pkg/compact/compact.go.
@@ -95,13 +96,13 @@ func (m *syncerMetrics) gatherThanosSyncerMetrics(reg *prometheus.Registry) {
mf, err := reg.Gather()
if err != nil {
- level.Warn(util.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err)
+ level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err)
return
}
mfm, err := util.NewMetricFamilyMap(mf)
if err != nil {
- level.Warn(util.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err)
+ level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err)
return
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go
index 9058ae722197b..127292bc69de3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go
@@ -24,7 +24,7 @@ import (
"github.com/cortexproject/cortex/pkg/configs/userconfig"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util"
- "github.com/cortexproject/cortex/pkg/util/log"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
var (
@@ -114,7 +114,7 @@ func (a *API) getConfig(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
- logger := log.WithContext(r.Context(), util.Logger)
+ logger := util_log.WithContext(r.Context(), util_log.Logger)
cfg, err := a.db.GetConfig(r.Context(), userID)
if err == sql.ErrNoRows {
@@ -152,7 +152,7 @@ func (a *API) setConfig(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
- logger := log.WithContext(r.Context(), util.Logger)
+ logger := util_log.WithContext(r.Context(), util_log.Logger)
var cfg userconfig.Config
switch parseConfigFormat(r.Header.Get("Content-Type"), FormatJSON) {
@@ -202,7 +202,7 @@ func (a *API) setConfig(w http.ResponseWriter, r *http.Request) {
}
func (a *API) validateAlertmanagerConfig(w http.ResponseWriter, r *http.Request) {
- logger := log.WithContext(r.Context(), util.Logger)
+ logger := util_log.WithContext(r.Context(), util_log.Logger)
cfg, err := ioutil.ReadAll(r.Body)
if err != nil {
level.Error(logger).Log("msg", "error reading request body", "err", err)
@@ -266,7 +266,7 @@ type ConfigsView struct {
func (a *API) getConfigs(w http.ResponseWriter, r *http.Request) {
var cfgs map[string]userconfig.View
var cfgErr error
- logger := log.WithContext(r.Context(), util.Logger)
+ logger := util_log.WithContext(r.Context(), util_log.Logger)
rawSince := r.FormValue("since")
if rawSince == "" {
cfgs, cfgErr = a.db.GetAllConfigs(r.Context())
@@ -302,7 +302,7 @@ func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
- logger := log.WithContext(r.Context(), util.Logger)
+ logger := util_log.WithContext(r.Context(), util_log.Logger)
if err := a.db.DeactivateConfig(r.Context(), userID); err != nil {
if err == sql.ErrNoRows {
@@ -324,7 +324,7 @@ func (a *API) restoreConfig(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
- logger := log.WithContext(r.Context(), util.Logger)
+ logger := util_log.WithContext(r.Context(), util_log.Logger)
if err := a.db.RestoreConfig(r.Context(), userID); err != nil {
if err == sql.ErrNoRows {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go
index cc57ad82f6def..5517d1cb5b559 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go
@@ -18,8 +18,8 @@ import (
"github.com/weaveworks/common/instrument"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
tls_cfg "github.com/cortexproject/cortex/pkg/util/tls"
)
@@ -155,7 +155,7 @@ func doRequest(endpoint string, timeout time.Duration, tlsConfig *tls.Config, si
var config ConfigsResponse
if err := json.NewDecoder(resp.Body).Decode(&config); err != nil {
- level.Error(util.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err)
+ level.Error(util_log.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err)
return nil, err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go
index 2bd3bbd3e7ce2..c66ff90fd61bb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go
@@ -9,6 +9,7 @@ import (
"time"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/Masterminds/squirrel"
"github.com/go-kit/kit/log/level"
@@ -18,8 +19,6 @@ import (
"github.com/lib/pq"
_ "github.com/lib/pq" // Import the postgres sql driver
"github.com/pkg/errors"
-
- "github.com/cortexproject/cortex/pkg/util"
)
const (
@@ -60,7 +59,7 @@ func dbWait(db *sql.DB) error {
if err == nil {
return nil
}
- level.Warn(util.Logger).Log("msg", "db connection not established, retrying...", "err", err)
+ level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err)
time.Sleep(time.Second << uint(tries))
}
return errors.Wrapf(err, "db connection not established after %s", dbTimeout)
@@ -88,13 +87,13 @@ func New(uri, migrationsDir string) (DB, error) {
return DB{}, errors.Wrap(err, "database migrations initialization failed")
}
- level.Info(util.Logger).Log("msg", "running database migrations...")
+ level.Info(util_log.Logger).Log("msg", "running database migrations...")
if err := m.Up(); err != nil {
if err != migrate.ErrNoChange {
return DB{}, errors.Wrap(err, "database migrations failed")
}
- level.Debug(util.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
+ level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err)
}
}
@@ -354,7 +353,7 @@ func (d DB) Transaction(f func(DB) error) error {
if err != nil {
// Rollback error is ignored as we already have one in progress
if err2 := tx.Rollback(); err2 != nil {
- level.Warn(util.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2)
+ level.Warn(util_log.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2)
}
return err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go
index 7a2cc3aac6130..5ae94c01d3004 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go
@@ -5,10 +5,9 @@ import (
"fmt"
"github.com/cortexproject/cortex/pkg/configs/userconfig"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
-
- "github.com/cortexproject/cortex/pkg/util"
)
// traced adds log trace lines on each db call
@@ -17,7 +16,7 @@ type traced struct {
}
func (t traced) trace(name string, args ...interface{}) {
- level.Debug(util.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args))
+ level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args))
}
func (t traced) GetConfig(ctx context.Context, userID string) (cfg userconfig.View, err error) {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go
index 0b36801012ced..e55542fd443b5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go
@@ -17,7 +17,7 @@ import (
"github.com/prometheus/prometheus/rules"
legacy_promql "github.com/cortexproject/cortex/pkg/configs/legacy_promql"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// An ID is the ID of a single users's Cortex configuration. When a
@@ -370,7 +370,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) {
labels.FromMap(rl.Annotations),
nil,
true,
- log.With(util.Logger, "alert", rl.Alert.Value),
+ log.With(util_log.Logger, "alert", rl.Alert.Value),
))
continue
}
@@ -418,7 +418,7 @@ func (c RulesConfig) parseV1() (map[string][]rules.Rule, error) {
rule = rules.NewAlertingRule(
r.Name, expr, r.Duration, r.Labels, r.Annotations, nil, true,
- log.With(util.Logger, "alert", r.Name),
+ log.With(util_log.Logger, "alert", r.Name),
)
case *legacy_promql.RecordStmt:
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
index d92d18dbc5c34..aa06abf0019b2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
@@ -487,10 +487,11 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro
queryrange.PrometheusResponseExtractor{},
t.Cfg.Schema,
promql.EngineOpts{
- Logger: util_log.Logger,
- Reg: prometheus.DefaultRegisterer,
- MaxSamples: t.Cfg.Querier.MaxSamples,
- Timeout: t.Cfg.Querier.Timeout,
+ Logger: util_log.Logger,
+ Reg: prometheus.DefaultRegisterer,
+ MaxSamples: t.Cfg.Querier.MaxSamples,
+ Timeout: t.Cfg.Querier.Timeout,
+ EnableAtModifier: t.Cfg.Querier.AtModifierEnabled,
NoStepSubqueryIntervalFn: func(int64) int64 {
return t.Cfg.Querier.DefaultEvaluationInterval.Milliseconds()
},
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go
index 7a565fbfb0174..850aedcd32b26 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go
@@ -7,7 +7,7 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/weaveworks/common/server"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -47,7 +47,7 @@ func NewServerService(serv *server.Server, servicesToWaitFor func() []services.S
// if not closed yet, wait until server stops.
<-serverDone
- level.Info(util.Logger).Log("msg", "server stopped")
+ level.Info(util_log.Logger).Log("msg", "server stopped")
return nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go
index 1b4679ec58005..45a3c3c3f3462 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go
@@ -170,10 +170,6 @@ type Config struct {
// for testing and for extending the ingester by adding calls to the client
IngesterClientFactory ring_client.PoolFactory `yaml:"-"`
- // when true the distributor does not validate the label name, Cortex doesn't directly use
- // this (and should never use it) but this feature is used by other projects built on top of it
- SkipLabelNameValidation bool `yaml:"-"`
-
// This config is dynamically injected because defined in the querier config.
ShuffleShardingLookbackPeriod time.Duration `yaml:"-"`
}
@@ -496,8 +492,7 @@ func (d *Distributor) Push(ctx context.Context, req *ingester_client.WriteReques
return nil, err
}
- skipLabelNameValidation := d.cfg.SkipLabelNameValidation || req.GetSkipLabelNameValidation()
- validatedSeries, err := d.validateSeries(ts, userID, skipLabelNameValidation)
+ validatedSeries, err := d.validateSeries(ts, userID, req.GetSkipLabelNameValidation())
// Errors in validation are considered non-fatal, as one series in a request may contain
// invalid data but all the remaining series could be perfectly valid.
@@ -568,7 +563,7 @@ func (d *Distributor) Push(ctx context.Context, req *ingester_client.WriteReques
op = ring.Write
}
- err = ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.IngesterDesc, indexes []int) error {
+ err = ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error {
timeseries := make([]ingester_client.PreallocTimeseries, 0, len(indexes))
var metadata []*ingester_client.MetricMetadata
@@ -621,7 +616,7 @@ func sortLabelsIfNeeded(labels []ingester_client.LabelAdapter) {
})
}
-func (d *Distributor) send(ctx context.Context, ingester ring.IngesterDesc, timeseries []ingester_client.PreallocTimeseries, metadata []*ingester_client.MetricMetadata, source ingester_client.WriteRequest_SourceEnum) error {
+func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []ingester_client.PreallocTimeseries, metadata []*ingester_client.MetricMetadata, source ingester_client.WriteRequest_SourceEnum) error {
h, err := d.ingesterPool.GetClientFor(ingester.Addr)
if err != nil {
return err
@@ -653,7 +648,7 @@ func (d *Distributor) send(ctx context.Context, ingester ring.IngesterDesc, time
// ForReplicationSet runs f, in parallel, for all ingesters in the input replication set.
func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, f func(context.Context, ingester_client.IngesterClient) (interface{}, error)) ([]interface{}, error) {
- return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) {
+ return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) {
client, err := d.ingesterPool.GetClientFor(ing.Addr)
if err != nil {
return nil, err
diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go
index af77a5d98bced..78296341f78ab 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go
@@ -9,8 +9,8 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/ring/kv"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// RingConfig masks the ring lifecycler config which contains
@@ -36,7 +36,7 @@ type RingConfig struct {
func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) {
hostname, err := os.Hostname()
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err)
os.Exit(1)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go
index d7b526b497f2b..6aef3e7fd317c 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go
@@ -21,7 +21,7 @@ import (
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring/kv"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -155,7 +155,7 @@ func newClusterTracker(cfg HATrackerConfig, limits haTrackerLimits, reg promethe
}
t := &haTracker{
- logger: util.Logger,
+ logger: util_log.Logger,
cfg: cfg,
updateTimeoutJitter: jitter,
limits: limits,
@@ -259,7 +259,7 @@ func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica s
// The callback within checkKVStore will return a replicasNotMatchError if the sample is being deduped,
// otherwise there may have been an actual error CAS'ing that we should log.
if !errors.Is(err, replicasNotMatchError{}) {
- level.Error(util.Logger).Log("msg", "rejecting sample", "err", err)
+ level.Error(util_log.Logger).Log("msg", "rejecting sample", "err", err)
}
}
return err
diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go
index 501ce4a42236c..b8a214ab98ceb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go
@@ -129,7 +129,7 @@ func (d *Distributor) GetIngestersForMetadata(ctx context.Context) (ring.Replica
func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (model.Matrix, error) {
// Fetch samples from multiple ingesters in parallel, using the replicationSet
// to deal with consistency.
- results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) {
+ results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) {
client, err := d.ingesterPool.GetClientFor(ing.Addr)
if err != nil {
return nil, err
@@ -174,7 +174,7 @@ func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.Re
// queryIngesterStream queries the ingesters using the new streaming API.
func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (*ingester_client.QueryStreamResponse, error) {
// Fetch samples from multiple ingesters
- results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) {
+ results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) {
client, err := d.ingesterPool.GetClientFor(ing.Addr)
if err != nil {
return nil, err
diff --git a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go
index 6157599a87584..4cf4495a783cd 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go
@@ -11,6 +11,7 @@ import (
"github.com/cortexproject/cortex/pkg/ingester"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
"github.com/cortexproject/cortex/pkg/util/validation"
)
@@ -87,7 +88,7 @@ func (f *Flusher) running(ctx context.Context) error {
// Sleeping to give a chance to Prometheus
// to collect the metrics.
- level.Info(util.Logger).Log("msg", "sleeping to give chance for collection of metrics", "duration", postFlushSleepTime.String())
+ level.Info(util_log.Logger).Log("msg", "sleeping to give chance for collection of metrics", "duration", postFlushSleepTime.String())
time.Sleep(postFlushSleepTime)
if err := services.StopAndAwaitTerminated(ctx, ing); err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
index 0e2fa8270451f..139acb8019324 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
@@ -1578,7 +1578,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) {
uploaded, err := userDB.shipper.Sync(ctx)
if err != nil {
- level.Warn(util.Logger).Log("msg", "shipper failed to synchronize TSDB blocks with the storage", "user", userID, "uploaded", uploaded, "err", err)
+ level.Warn(log.Logger).Log("msg", "shipper failed to synchronize TSDB blocks with the storage", "user", userID, "uploaded", uploaded, "err", err)
} else {
level.Debug(log.Logger).Log("msg", "shipper successfully synchronized TSDB blocks with storage", "user", userID, "uploaded", uploaded)
}
@@ -1590,7 +1590,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) {
// the cached list of blocks in such case, so we're not handling it.
if uploaded > 0 {
if err := userDB.updateCachedShippedBlocks(); err != nil {
- level.Error(util.Logger).Log("msg", "failed to update cached shipped blocks after shipper synchronisation", "user", userID, "err", err)
+ level.Error(log.Logger).Log("msg", "failed to update cached shipped blocks after shipper synchronisation", "user", userID, "err", err)
}
}
@@ -1811,7 +1811,7 @@ func (i *Ingester) v2FlushHandler(w http.ResponseWriter, _ *http.Request) {
}
if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() {
- level.Info(util.Logger).Log("msg", "flushing TSDB blocks: triggering shipping")
+ level.Info(log.Logger).Log("msg", "flushing TSDB blocks: triggering shipping")
select {
case i.TSDBState.shipTrigger <- ch:
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go
index 18977e7176e7a..bf3a2a1bfae6c 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go
@@ -10,7 +10,7 @@ import (
"github.com/prometheus/common/model"
"go.uber.org/atomic"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const maxMappedFP = 1 << 20 // About 1M fingerprints reserved for mapping.
@@ -105,7 +105,7 @@ func (m *fpMapper) maybeAddMapping(
// A new mapping has to be created.
mappedFP = m.nextMappedFP()
mappedFPs[ms] = mappedFP
- level.Debug(util.Logger).Log(
+ level.Debug(util_log.Logger).Log(
"msg", "fingerprint collision detected, mapping to new fingerprint",
"old_fp", fp,
"new_fp", mappedFP,
@@ -119,7 +119,7 @@ func (m *fpMapper) maybeAddMapping(
m.mtx.Lock()
m.mappings[fp] = mappedFPs
m.mtx.Unlock()
- level.Debug(util.Logger).Log(
+ level.Debug(util_log.Logger).Log(
"msg", "fingerprint collision detected, mapping to new fingerprint",
"old_fp", fp,
"new_fp", mappedFP,
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go
index b9b87bda7e0d4..305aacb0c48d2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go
@@ -17,6 +17,7 @@ import (
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
var (
@@ -53,7 +54,7 @@ func (i *Ingester) fillUserStatesFromStream(userStates *userStates, stream clien
// round this loop.
if fromIngesterID == "" {
fromIngesterID = wireSeries.FromIngesterId
- level.Info(util.Logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID)
+ level.Info(util_log.Logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID)
// Before transfer, make sure 'from' ingester is in correct state to call ClaimTokensFor later
err := i.checkFromIngesterIsInLeavingState(stream.Context(), fromIngesterID)
@@ -90,13 +91,13 @@ func (i *Ingester) fillUserStatesFromStream(userStates *userStates, stream clien
}
if seriesReceived == 0 {
- level.Error(util.Logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID)
+ level.Error(util_log.Logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID)
retErr = fmt.Errorf("TransferChunks: no series")
return
}
if fromIngesterID == "" {
- level.Error(util.Logger).Log("msg", "received TransferChunks request with no ID from ingester")
+ level.Error(util_log.Logger).Log("msg", "received TransferChunks request with no ID from ingester")
retErr = fmt.Errorf("no ingester id")
return
}
@@ -139,10 +140,10 @@ func (i *Ingester) TransferChunks(stream client.Ingester_TransferChunksServer) e
// Close the stream last, as this is what tells the "from" ingester that
// it's OK to shut down.
if err := stream.SendAndClose(&client.TransferChunksResponse{}); err != nil {
- level.Error(util.Logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err)
+ level.Error(util_log.Logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err)
return err
}
- level.Info(util.Logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived)
+ level.Info(util_log.Logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived)
return nil
}
@@ -186,12 +187,12 @@ func (i *Ingester) transfer(ctx context.Context, xfer func() error) error {
return
}
- level.Error(util.Logger).Log("msg", "TransferChunks failed, not in ACTIVE state.", "state", state)
+ level.Error(util_log.Logger).Log("msg", "TransferChunks failed, not in ACTIVE state.", "state", state)
// Enter PENDING state (only valid from JOINING)
if i.lifecycler.GetState() == ring.JOINING {
if err := i.lifecycler.ChangeState(ctx, ring.PENDING); err != nil {
- level.Error(util.Logger).Log("msg", "error rolling back failed TransferChunks", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error rolling back failed TransferChunks", "err", err)
os.Exit(1)
}
}
@@ -267,7 +268,7 @@ func fromWireChunks(wireChunks []client.Chunk) ([]*desc, error) {
func (i *Ingester) TransferOut(ctx context.Context) error {
// The blocks storage doesn't support blocks transferring.
if i.cfg.BlocksStorageEnabled {
- level.Info(util.Logger).Log("msg", "transfer between a LEAVING ingester and a PENDING one is not supported for the blocks storage")
+ level.Info(util_log.Logger).Log("msg", "transfer between a LEAVING ingester and a PENDING one is not supported for the blocks storage")
return ring.ErrTransferDisabled
}
@@ -287,23 +288,23 @@ func (i *Ingester) TransferOut(ctx context.Context) error {
for backoff.Ongoing() {
err = i.transferOut(ctx)
if err == nil {
- level.Info(util.Logger).Log("msg", "transfer successfully completed")
+ level.Info(util_log.Logger).Log("msg", "transfer successfully completed")
return nil
}
- level.Warn(util.Logger).Log("msg", "transfer attempt failed", "err", err, "attempt", backoff.NumRetries()+1, "max_retries", i.cfg.MaxTransferRetries)
+ level.Warn(util_log.Logger).Log("msg", "transfer attempt failed", "err", err, "attempt", backoff.NumRetries()+1, "max_retries", i.cfg.MaxTransferRetries)
backoff.Wait()
}
- level.Error(util.Logger).Log("msg", "all transfer attempts failed", "err", err)
+ level.Error(util_log.Logger).Log("msg", "all transfer attempts failed", "err", err)
return backoff.Err()
}
func (i *Ingester) transferOut(ctx context.Context) error {
userStatesCopy := i.userStates.cp()
if len(userStatesCopy) == 0 {
- level.Info(util.Logger).Log("msg", "nothing to transfer")
+ level.Info(util_log.Logger).Log("msg", "nothing to transfer")
return nil
}
@@ -312,7 +313,7 @@ func (i *Ingester) transferOut(ctx context.Context) error {
return fmt.Errorf("cannot find ingester to transfer chunks to: %w", err)
}
- level.Info(util.Logger).Log("msg", "sending chunks", "to_ingester", targetIngester.Addr)
+ level.Info(util_log.Logger).Log("msg", "sending chunks", "to_ingester", targetIngester.Addr)
c, err := i.cfg.ingesterClientFactory(targetIngester.Addr, i.clientConfig)
if err != nil {
return err
@@ -367,12 +368,12 @@ func (i *Ingester) transferOut(ctx context.Context) error {
}
i.flushQueuesDone.Wait()
- level.Info(util.Logger).Log("msg", "successfully sent chunks", "to_ingester", targetIngester.Addr)
+ level.Info(util_log.Logger).Log("msg", "successfully sent chunks", "to_ingester", targetIngester.Addr)
return nil
}
// findTargetIngester finds an ingester in PENDING state.
-func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.IngesterDesc, error) {
+func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.InstanceDesc, error) {
ringDesc, err := i.lifecycler.KVStore.Get(ctx, i.lifecycler.RingKey)
if err != nil {
return nil, err
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go
index 8ec6b6abd9f5f..15d93d6894735 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go
@@ -26,7 +26,7 @@ import (
"github.com/prometheus/prometheus/tsdb/wal"
"github.com/cortexproject/cortex/pkg/ingester/client"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// WALConfig is config for the Write Ahead Log.
@@ -109,7 +109,7 @@ func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, register
if registerer != nil {
walRegistry = prometheus.WrapRegistererWith(prometheus.Labels{"kind": "wal"}, registerer)
}
- tsdbWAL, err := wal.NewSize(util.Logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, false)
+ tsdbWAL, err := wal.NewSize(util_log.Logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, false)
if err != nil {
return nil, err
}
@@ -219,19 +219,19 @@ func (w *walWrapper) run() {
select {
case <-ticker.C:
start := time.Now()
- level.Info(util.Logger).Log("msg", "starting checkpoint")
+ level.Info(util_log.Logger).Log("msg", "starting checkpoint")
if err := w.performCheckpoint(false); err != nil {
- level.Error(util.Logger).Log("msg", "error checkpointing series", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error checkpointing series", "err", err)
continue
}
elapsed := time.Since(start)
- level.Info(util.Logger).Log("msg", "checkpoint done", "time", elapsed.String())
+ level.Info(util_log.Logger).Log("msg", "checkpoint done", "time", elapsed.String())
w.checkpointDuration.Observe(elapsed.Seconds())
case <-w.quit:
if w.cfg.checkpointDuringShutdown {
- level.Info(util.Logger).Log("msg", "creating checkpoint before shutdown")
+ level.Info(util_log.Logger).Log("msg", "creating checkpoint before shutdown")
if err := w.performCheckpoint(true); err != nil {
- level.Error(util.Logger).Log("msg", "error checkpointing series during shutdown", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error checkpointing series during shutdown", "err", err)
}
}
return
@@ -292,7 +292,7 @@ func (w *walWrapper) performCheckpoint(immediate bool) (err error) {
// Checkpoint is named after the last WAL segment present so that when replaying the WAL
// we can start from that particular WAL segment.
checkpointDir := filepath.Join(w.wal.Dir(), fmt.Sprintf(checkpointPrefix+"%06d", lastSegment))
- level.Info(util.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir)
+ level.Info(util_log.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir)
checkpointDirTemp := checkpointDir + ".tmp"
if err := os.MkdirAll(checkpointDirTemp, 0777); err != nil {
@@ -389,14 +389,14 @@ func (w *walWrapper) performCheckpoint(immediate bool) (err error) {
if err := w.wal.Truncate(lastCh); err != nil {
// It is fine to have old WAL segments hanging around if deletion failed.
// We can try again next time.
- level.Error(util.Logger).Log("msg", "error deleting old WAL segments", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error deleting old WAL segments", "err", err)
}
if lastCh >= 0 {
if err := w.deleteCheckpoints(lastCh); err != nil {
// It is fine to have old checkpoints hanging around if deletion failed.
// We can try again next time.
- level.Error(util.Logger).Log("msg", "error deleting old checkpoint", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error deleting old checkpoint", "err", err)
}
}
@@ -520,17 +520,17 @@ func recoverFromWAL(ingester *Ingester) error {
params.seriesCache[i] = make(map[string]map[uint64]*memorySeries)
}
- level.Info(util.Logger).Log("msg", "recovering from checkpoint")
+ level.Info(util_log.Logger).Log("msg", "recovering from checkpoint")
start := time.Now()
userStates, idx, err := processCheckpointWithRepair(params)
if err != nil {
return err
}
elapsed := time.Since(start)
- level.Info(util.Logger).Log("msg", "recovered from checkpoint", "time", elapsed.String())
+ level.Info(util_log.Logger).Log("msg", "recovered from checkpoint", "time", elapsed.String())
if segExists, err := segmentsExist(params.walDir); err == nil && !segExists {
- level.Info(util.Logger).Log("msg", "no segments found, skipping recover from segments")
+ level.Info(util_log.Logger).Log("msg", "no segments found, skipping recover from segments")
ingester.userStatesMtx.Lock()
ingester.userStates = userStates
ingester.userStatesMtx.Unlock()
@@ -539,13 +539,13 @@ func recoverFromWAL(ingester *Ingester) error {
return err
}
- level.Info(util.Logger).Log("msg", "recovering from WAL", "dir", params.walDir, "start_segment", idx)
+ level.Info(util_log.Logger).Log("msg", "recovering from WAL", "dir", params.walDir, "start_segment", idx)
start = time.Now()
if err := processWALWithRepair(idx, userStates, params); err != nil {
return err
}
elapsed = time.Since(start)
- level.Info(util.Logger).Log("msg", "recovered from WAL", "time", elapsed.String())
+ level.Info(util_log.Logger).Log("msg", "recovered from WAL", "time", elapsed.String())
ingester.userStatesMtx.Lock()
ingester.userStates = userStates
@@ -563,11 +563,11 @@ func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int
return nil, -1, err
}
if idx < 0 {
- level.Info(util.Logger).Log("msg", "no checkpoint found")
+ level.Info(util_log.Logger).Log("msg", "no checkpoint found")
return userStates, -1, nil
}
- level.Info(util.Logger).Log("msg", fmt.Sprintf("recovering from %s", lastCheckpointDir))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("recovering from %s", lastCheckpointDir))
err = processCheckpoint(lastCheckpointDir, userStates, params)
if err == nil {
@@ -577,7 +577,7 @@ func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int
// We don't call repair on checkpoint as losing even a single record is like losing the entire data of a series.
// We try recovering from the older checkpoint instead.
params.ingester.metrics.walCorruptionsTotal.Inc()
- level.Error(util.Logger).Log("msg", "checkpoint recovery failed, deleting this checkpoint and trying to recover from old checkpoint", "err", err)
+ level.Error(util_log.Logger).Log("msg", "checkpoint recovery failed, deleting this checkpoint and trying to recover from old checkpoint", "err", err)
// Deleting this checkpoint to try the previous checkpoint.
if err := os.RemoveAll(lastCheckpointDir); err != nil {
@@ -599,7 +599,7 @@ func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int
return userStates, -1, nil
}
- level.Info(util.Logger).Log("msg", fmt.Sprintf("attempting recovery from %s", lastCheckpointDir))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("attempting recovery from %s", lastCheckpointDir))
if err := processCheckpoint(lastCheckpointDir, userStates, params); err != nil {
// We won't attempt the repair again even if its the old checkpoint.
params.ingester.metrics.walCorruptionsTotal.Inc()
@@ -782,18 +782,18 @@ func processWALWithRepair(startSegment int, userStates *userStates, params walRe
}
params.ingester.metrics.walCorruptionsTotal.Inc()
- level.Error(util.Logger).Log("msg", "error in replaying from WAL", "err", corruptErr)
+ level.Error(util_log.Logger).Log("msg", "error in replaying from WAL", "err", corruptErr)
// Attempt repair.
- level.Info(util.Logger).Log("msg", "attempting repair of the WAL")
- w, err := wal.New(util.Logger, nil, params.walDir, true)
+ level.Info(util_log.Logger).Log("msg", "attempting repair of the WAL")
+ w, err := wal.New(util_log.Logger, nil, params.walDir, true)
if err != nil {
return err
}
err = w.Repair(corruptErr)
if err != nil {
- level.Error(util.Logger).Log("msg", "error in repairing WAL", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error in repairing WAL", "err", err)
}
return tsdb_errors.NewMulti(err, w.Close()).Err()
@@ -970,7 +970,7 @@ func processWALSamples(userStates *userStates, stateCache map[string]*userState,
// If the series was not created in recovering checkpoint or
// from the labels of any records previous to this, there
// is no way to get the labels for this fingerprint.
- level.Warn(util.Logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Ref).String())
+ level.Warn(util_log.Logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Ref).String())
continue
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go b/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go
index 989c6bdb92f08..d9f309e1112c4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go
@@ -6,7 +6,7 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/prometheus/prometheus/promql/parser"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
var summableAggregates = map[parser.ItemType]struct{}{
@@ -90,7 +90,7 @@ func CanParallelize(node parser.Node) bool {
return true
default:
- level.Error(util.Logger).Log("err", fmt.Sprintf("CanParallel: unhandled node type %T", node))
+ level.Error(util_log.Logger).Log("err", fmt.Sprintf("CanParallel: unhandled node type %T", node))
return false
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go
index 0e408dcbf10b1..8c94ad4b64f2d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go
@@ -17,6 +17,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring/client"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -52,7 +53,7 @@ func (s *blocksStoreBalancedSet) starting(ctx context.Context) error {
func (s *blocksStoreBalancedSet) resolve(ctx context.Context) error {
if err := s.dnsProvider.Resolve(ctx, s.serviceAddresses); err != nil {
- level.Error(util.Logger).Log("msg", "failed to resolve store-gateway addresses", "err", err, "addresses", s.serviceAddresses)
+ level.Error(util_log.Logger).Log("msg", "failed to resolve store-gateway addresses", "err", err, "addresses", s.serviceAddresses)
}
return nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go
index ce8d12ee21e35..4599df2a5aec2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go
@@ -28,6 +28,7 @@ import (
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/spanlogger"
"github.com/cortexproject/cortex/pkg/util/validation"
)
@@ -42,6 +43,7 @@ type Config struct {
MaxSamples int `yaml:"max_samples"`
QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"`
QueryStoreForLabels bool `yaml:"query_store_for_labels_enabled"`
+ AtModifierEnabled bool `yaml:"at_modifier_enabled"`
// QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters.
QueryStoreAfter time.Duration `yaml:"query_store_after"`
@@ -88,6 +90,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.")
f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.")
f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels-enabled", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.")
+ f.BoolVar(&cfg.AtModifierEnabled, "querier.at-modifier-enabled", false, "Enable the @ modifier in PromQL.")
f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.")
f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.")
f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.")
@@ -164,12 +167,13 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor
})
engine := promql.NewEngine(promql.EngineOpts{
- Logger: util.Logger,
+ Logger: util_log.Logger,
Reg: reg,
ActiveQueryTracker: createActiveQueryTracker(cfg),
MaxSamples: cfg.MaxSamples,
Timeout: cfg.Timeout,
LookbackDelta: cfg.LookbackDelta,
+ EnableAtModifier: cfg.AtModifierEnabled,
NoStepSubqueryIntervalFn: func(int64) int64 {
return cfg.DefaultEvaluationInterval.Milliseconds()
},
@@ -195,7 +199,7 @@ func createActiveQueryTracker(cfg Config) *promql.ActiveQueryTracker {
dir := cfg.ActiveQueryTrackerDir
if dir != "" {
- return promql.NewActiveQueryTracker(dir, cfg.MaxConcurrent, util.Logger)
+ return promql.NewActiveQueryTracker(dir, cfg.MaxConcurrent, util_log.Logger)
}
return nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go
index 0b38a181785ae..752b3ed463fd9 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go
@@ -6,6 +6,7 @@ import (
"fmt"
"net/http"
"sort"
+ "strings"
"time"
"github.com/go-kit/kit/log"
@@ -17,6 +18,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/promql/parser"
"github.com/uber/jaeger-client-go"
"github.com/weaveworks/common/httpgrpc"
@@ -209,9 +211,9 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) {
cached, ok := s.get(ctx, key)
if ok {
- response, extents, err = s.handleHit(ctx, r, cached)
+ response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime)
} else {
- response, extents, err = s.handleMiss(ctx, r)
+ response, extents, err = s.handleMiss(ctx, r, maxCacheTime)
}
if err == nil && len(extents) > 0 {
@@ -226,7 +228,7 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) {
}
// shouldCacheResponse says whether the response should be cached or not.
-func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool {
+func (s resultsCache) shouldCacheResponse(ctx context.Context, req Request, r Response, maxCacheTime int64) bool {
headerValues := getHeaderValuesWithName(r, cacheControlHeader)
for _, v := range headerValues {
if v == noStoreValue {
@@ -235,6 +237,10 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool
}
}
+ if !s.isAtModifierCachable(req, maxCacheTime) {
+ return false
+ }
+
if s.cacheGenNumberLoader == nil {
return true
}
@@ -257,6 +263,55 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool
return true
}
+var errAtModifierAfterEnd = errors.New("at modifier after end")
+
+// isAtModifierCachable returns true if the @ modifier result
+// is safe to cache.
+func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool {
+ // There are 2 cases when @ modifier is not safe to cache:
+ // 1. When @ modifier points to time beyond the maxCacheTime.
+ // 2. If the @ modifier time is > the query range end while being
+ // below maxCacheTime. In such cases if any tenant is intentionally
+ // playing with old data, we could cache empty result if we look
+ // beyond query end.
+ query := r.GetQuery()
+ if !strings.Contains(query, "@") {
+ return true
+ }
+ expr, err := parser.ParseExpr(query)
+ if err != nil {
+ // We are being pessimistic in such cases.
+ level.Warn(s.logger).Log("msg", "failed to parse query, considering @ modifier as not cachable", "query", query, "err", err)
+ return false
+ }
+
+ end := r.GetEnd()
+ atModCachable := true
+ parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error {
+ switch e := n.(type) {
+ case *parser.VectorSelector:
+ if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) {
+ atModCachable = false
+ return errAtModifierAfterEnd
+ }
+ case *parser.MatrixSelector:
+ ts := e.VectorSelector.(*parser.VectorSelector).Timestamp
+ if ts != nil && (*ts > end || *ts > maxCacheTime) {
+ atModCachable = false
+ return errAtModifierAfterEnd
+ }
+ case *parser.SubqueryExpr:
+ if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) {
+ atModCachable = false
+ return errAtModifierAfterEnd
+ }
+ }
+ return nil
+ })
+
+ return atModCachable
+}
+
func getHeaderValuesWithName(r Response, headerName string) (headerValues []string) {
for _, hv := range r.GetHeaders() {
if hv.GetName() != headerName {
@@ -269,13 +324,13 @@ func getHeaderValuesWithName(r Response, headerName string) (headerValues []stri
return
}
-func (s resultsCache) handleMiss(ctx context.Context, r Request) (Response, []Extent, error) {
+func (s resultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) {
response, err := s.next.Do(ctx, r)
if err != nil {
return nil, nil, err
}
- if !s.shouldCacheResponse(ctx, response) {
+ if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) {
return response, []Extent{}, nil
}
@@ -290,7 +345,7 @@ func (s resultsCache) handleMiss(ctx context.Context, r Request) (Response, []Ex
return response, extents, nil
}
-func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent) (Response, []Extent, error) {
+func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) {
var (
reqResps []RequestResponse
err error
@@ -315,7 +370,7 @@ func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent
for _, reqResp := range reqResps {
responses = append(responses, reqResp.Response)
- if !s.shouldCacheResponse(ctx, reqResp.Response) {
+ if !s.shouldCacheResponse(ctx, r, reqResp.Response, maxCacheTime) {
continue
}
extent, err := toExtent(ctx, reqResp.Request, s.extractor.ResponseWithoutHeaders(reqResp.Response))
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
index 102865ede1aaa..efe10149386be 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
@@ -13,7 +13,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/cortexproject/cortex/pkg/ring/kv"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -21,7 +21,7 @@ type BasicLifecyclerDelegate interface {
// OnRingInstanceRegister is called while the lifecycler is registering the
// instance within the ring and should return the state and set of tokens to
// use for the instance itself.
- OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens)
+ OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens)
// OnRingInstanceTokens is called once the instance tokens are set and are
// stable within the ring (honoring the observe period, if set).
@@ -34,7 +34,7 @@ type BasicLifecyclerDelegate interface {
// OnRingInstanceHeartbeat is called while the instance is updating its heartbeat
// in the ring.
- OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc)
+ OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc)
}
type BasicLifecyclerConfig struct {
@@ -77,7 +77,7 @@ type BasicLifecycler struct {
// The current instance state.
currState sync.RWMutex
- currInstanceDesc *IngesterDesc
+ currInstanceDesc *InstanceDesc
}
// NewBasicLifecycler makes a new BasicLifecycler.
@@ -194,7 +194,7 @@ func (l *BasicLifecycler) running(ctx context.Context) error {
f()
case <-ctx.Done():
- level.Info(util.Logger).Log("msg", "ring lifecycler is shutting down", "ring", l.ringName)
+ level.Info(util_log.Logger).Log("msg", "ring lifecycler is shutting down", "ring", l.ringName)
return nil
}
}
@@ -239,7 +239,7 @@ heartbeatLoop:
// registerInstance registers the instance in the ring. The initial state and set of tokens
// depends on the OnRingInstanceRegister() delegate function.
func (l *BasicLifecycler) registerInstance(ctx context.Context) error {
- var instanceDesc IngesterDesc
+ var instanceDesc InstanceDesc
err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) {
ringDesc := GetOrCreateRingDesc(in)
@@ -327,7 +327,7 @@ func (l *BasicLifecycler) waitStableTokens(ctx context.Context, period time.Dura
func (l *BasicLifecycler) verifyTokens(ctx context.Context) bool {
result := false
- err := l.updateInstance(ctx, func(r *Desc, i *IngesterDesc) bool {
+ err := l.updateInstance(ctx, func(r *Desc, i *InstanceDesc) bool {
// At this point, we should have the same tokens as we have registered before.
actualTokens, takenTokens := r.TokensFor(l.cfg.ID)
@@ -385,8 +385,8 @@ func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error {
return nil
}
-func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *IngesterDesc) bool) error {
- var instanceDesc IngesterDesc
+func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *InstanceDesc) bool) error {
+ var instanceDesc InstanceDesc
err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) {
ringDesc := GetOrCreateRingDesc(in)
@@ -431,7 +431,7 @@ func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc,
// heartbeat updates the instance timestamp within the ring. This function is guaranteed
// to be called within the lifecycler main goroutine.
func (l *BasicLifecycler) heartbeat(ctx context.Context) {
- err := l.updateInstance(ctx, func(r *Desc, i *IngesterDesc) bool {
+ err := l.updateInstance(ctx, func(r *Desc, i *InstanceDesc) bool {
l.delegate.OnRingInstanceHeartbeat(l, r, i)
i.Timestamp = time.Now().Unix()
return true
@@ -448,7 +448,7 @@ func (l *BasicLifecycler) heartbeat(ctx context.Context) {
// changeState of the instance within the ring. This function is guaranteed
// to be called within the lifecycler main goroutine.
func (l *BasicLifecycler) changeState(ctx context.Context, state IngesterState) error {
- err := l.updateInstance(ctx, func(_ *Desc, i *IngesterDesc) bool {
+ err := l.updateInstance(ctx, func(_ *Desc, i *InstanceDesc) bool {
// No-op if the state hasn't changed.
if i.State == state {
return false
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go
index 8006d350767eb..7126198e15045 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go
@@ -21,7 +21,7 @@ func NewLeaveOnStoppingDelegate(next BasicLifecyclerDelegate, logger log.Logger)
}
}
-func (d *LeaveOnStoppingDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) {
+func (d *LeaveOnStoppingDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) {
return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc)
}
@@ -37,7 +37,7 @@ func (d *LeaveOnStoppingDelegate) OnRingInstanceStopping(lifecycler *BasicLifecy
d.next.OnRingInstanceStopping(lifecycler)
}
-func (d *LeaveOnStoppingDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) {
+func (d *LeaveOnStoppingDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) {
d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc)
}
@@ -57,7 +57,7 @@ func NewTokensPersistencyDelegate(path string, state IngesterState, next BasicLi
}
}
-func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) {
+func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) {
// Skip if no path has been configured.
if d.tokensPath == "" {
level.Info(d.logger).Log("msg", "not loading tokens from file, tokens file path is empty")
@@ -82,7 +82,7 @@ func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLife
// Signal the next delegate that the tokens have been loaded, miming the
// case the instance exist in the ring (which is OK because the lifecycler
// will correctly reconcile this case too).
- return d.next.OnRingInstanceRegister(lifecycler, ringDesc, true, lifecycler.GetInstanceID(), IngesterDesc{
+ return d.next.OnRingInstanceRegister(lifecycler, ringDesc, true, lifecycler.GetInstanceID(), InstanceDesc{
Addr: lifecycler.GetInstanceAddr(),
Timestamp: time.Now().Unix(),
RegisteredTimestamp: lifecycler.GetRegisteredAt().Unix(),
@@ -106,7 +106,7 @@ func (d *TokensPersistencyDelegate) OnRingInstanceStopping(lifecycler *BasicLife
d.next.OnRingInstanceStopping(lifecycler)
}
-func (d *TokensPersistencyDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) {
+func (d *TokensPersistencyDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) {
d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc)
}
@@ -126,7 +126,7 @@ func NewAutoForgetDelegate(forgetPeriod time.Duration, next BasicLifecyclerDeleg
}
}
-func (d *AutoForgetDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) {
+func (d *AutoForgetDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) {
return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc)
}
@@ -138,7 +138,7 @@ func (d *AutoForgetDelegate) OnRingInstanceStopping(lifecycler *BasicLifecycler)
d.next.OnRingInstanceStopping(lifecycler)
}
-func (d *AutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) {
+func (d *AutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) {
for id, instance := range ringDesc.Ingesters {
lastHeartbeat := time.Unix(instance.GetTimestamp(), 0)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
index 3990bdd4dd300..c24dc200deea0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
@@ -16,7 +16,7 @@ type batchTracker struct {
}
type instance struct {
- desc IngesterDesc
+ desc InstanceDesc
itemTrackers []*itemTracker
indexes []int
}
@@ -38,7 +38,7 @@ type itemTracker struct {
// to send to that instance.
//
// Not implemented as a method on Ring so we can test separately.
-func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(IngesterDesc, []int) error, cleanup func()) error {
+func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
if r.InstancesCount() <= 0 {
return fmt.Errorf("DoBatch: InstancesCount <= 0")
}
@@ -47,7 +47,7 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb
instances := make(map[string]instance, r.InstancesCount())
var (
- bufDescs [GetBufferSize]IngesterDesc
+ bufDescs [GetBufferSize]InstanceDesc
bufHosts [GetBufferSize]string
bufZones [GetBufferSize]string
)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go
index 41bc8728abb36..39da1db688a9b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go
@@ -14,6 +14,7 @@ import (
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -158,7 +159,7 @@ func (p *Pool) removeStaleClients() {
serviceAddrs, err := p.discovery()
if err != nil {
- level.Error(util.Logger).Log("msg", "error removing stale clients", "err", err)
+ level.Error(util_log.Logger).Log("msg", "error removing stale clients", "err", err)
return
}
@@ -166,7 +167,7 @@ func (p *Pool) removeStaleClients() {
if util.StringsContain(serviceAddrs, addr) {
continue
}
- level.Info(util.Logger).Log("msg", "removing stale client", "addr", addr)
+ level.Info(util_log.Logger).Log("msg", "removing stale client", "addr", addr)
p.RemoveClientFor(addr)
}
}
@@ -179,7 +180,7 @@ func (p *Pool) cleanUnhealthy() {
if ok {
err := healthCheck(client, p.cfg.HealthCheckTimeout)
if err != nil {
- level.Warn(util.Logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err)
+ level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err)
p.RemoveClientFor(addr)
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go
index e0ab7ce64b93f..1706edb2a175e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go
@@ -1,12 +1,17 @@
package client
import (
+ "errors"
+
"github.com/cortexproject/cortex/pkg/ring"
)
func NewRingServiceDiscovery(r ring.ReadRing) PoolServiceDiscovery {
return func() ([]string, error) {
replicationSet, err := r.GetAllHealthy(ring.Reporting)
+ if errors.Is(err, ring.ErrEmptyRing) {
+ return nil, nil
+ }
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go
index c1e12863e9573..1c39a473ced79 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go
@@ -17,6 +17,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const (
@@ -143,14 +144,14 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou
options := &consul.QueryOptions{}
kvp, _, err := c.kv.Get(key, options.WithContext(ctx))
if err != nil {
- level.Error(util.Logger).Log("msg", "error getting key", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error getting key", "key", key, "err", err)
continue
}
var intermediate interface{}
if kvp != nil {
out, err := c.codec.Decode(kvp.Value)
if err != nil {
- level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err)
continue
}
// If key doesn't exist, index will be 0.
@@ -174,7 +175,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou
bytes, err := c.codec.Encode(intermediate)
if err != nil {
- level.Error(util.Logger).Log("msg", "error serialising value", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error serialising value", "key", key, "err", err)
continue
}
ok, _, err := c.kv.CAS(&consul.KVPair{
@@ -183,11 +184,11 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou
ModifyIndex: index,
}, writeOptions.WithContext(ctx))
if err != nil {
- level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error CASing", "key", key, "err", err)
continue
}
if !ok {
- level.Debug(util.Logger).Log("msg", "error CASing, trying again", "key", key, "index", index)
+ level.Debug(util_log.Logger).Log("msg", "error CASing, trying again", "key", key, "index", index)
continue
}
return nil
@@ -213,7 +214,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b
if errors.Is(err, context.Canceled) {
break
}
- level.Error(util.Logger).Log("msg", "error while rate-limiting", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error while rate-limiting", "key", key, "err", err)
backoff.Wait()
continue
}
@@ -230,7 +231,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b
// Don't backoff if value is not found (kvp == nil). In that case, Consul still returns index value,
// and next call to Get will block as expected. We handle missing value below.
if err != nil {
- level.Error(util.Logger).Log("msg", "error getting path", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error getting path", "key", key, "err", err)
backoff.Wait()
continue
}
@@ -243,13 +244,13 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b
}
if kvp == nil {
- level.Info(util.Logger).Log("msg", "value is nil", "key", key, "index", index)
+ level.Info(util_log.Logger).Log("msg", "value is nil", "key", key, "index", index)
continue
}
out, err := c.codec.Decode(kvp.Value)
if err != nil {
- level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err)
continue
}
if !f(out) {
@@ -273,7 +274,7 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string,
if errors.Is(err, context.Canceled) {
break
}
- level.Error(util.Logger).Log("msg", "error while rate-limiting", "prefix", prefix, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error while rate-limiting", "prefix", prefix, "err", err)
backoff.Wait()
continue
}
@@ -289,7 +290,7 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string,
// kvps being nil here is not an error -- quite the opposite. Consul returns index,
// which makes next query blocking, so there is no need to detect this and act on it.
if err != nil {
- level.Error(util.Logger).Log("msg", "error getting path", "prefix", prefix, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error getting path", "prefix", prefix, "err", err)
backoff.Wait()
continue
}
@@ -309,7 +310,7 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string,
out, err := c.codec.Decode(kvp.Value)
if err != nil {
- level.Error(util.Logger).Log("msg", "error decoding list of values for prefix:key", "prefix", prefix, "key", kvp.Key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error decoding list of values for prefix:key", "prefix", prefix, "key", kvp.Key, "err", err)
continue
}
if !f(kvp.Key, out) {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go
index 708bea76205f2..5d1e4557395b4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go
@@ -10,7 +10,7 @@ import (
consul "github.com/hashicorp/consul/api"
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
type mockKV struct {
@@ -78,12 +78,12 @@ func (m *mockKV) Put(p *consul.KVPair, q *consul.WriteOptions) (*consul.WriteMet
m.cond.Broadcast()
- level.Debug(util.Logger).Log("msg", "Put", "key", p.Key, "value", fmt.Sprintf("%.40q", p.Value), "modify_index", m.current)
+ level.Debug(util_log.Logger).Log("msg", "Put", "key", p.Key, "value", fmt.Sprintf("%.40q", p.Value), "modify_index", m.current)
return nil, nil
}
func (m *mockKV) CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.WriteMeta, error) {
- level.Debug(util.Logger).Log("msg", "CAS", "key", p.Key, "modify_index", p.ModifyIndex, "value", fmt.Sprintf("%.40q", p.Value))
+ level.Debug(util_log.Logger).Log("msg", "CAS", "key", p.Key, "modify_index", p.ModifyIndex, "value", fmt.Sprintf("%.40q", p.Value))
m.mtx.Lock()
defer m.mtx.Unlock()
@@ -110,14 +110,14 @@ func (m *mockKV) CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.Wr
}
func (m *mockKV) Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consul.QueryMeta, error) {
- level.Debug(util.Logger).Log("msg", "Get", "key", key, "wait_index", q.WaitIndex)
+ level.Debug(util_log.Logger).Log("msg", "Get", "key", key, "wait_index", q.WaitIndex)
m.mtx.Lock()
defer m.mtx.Unlock()
value := m.kvps[key]
if value == nil && q.WaitIndex == 0 {
- level.Debug(util.Logger).Log("msg", "Get - not found", "key", key)
+ level.Debug(util_log.Logger).Log("msg", "Get - not found", "key", key)
return nil, &consul.QueryMeta{LastIndex: m.current}, nil
}
@@ -146,17 +146,17 @@ func (m *mockKV) Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consu
}
}
if time.Now().After(deadline) {
- level.Debug(util.Logger).Log("msg", "Get - deadline exceeded", "key", key)
+ level.Debug(util_log.Logger).Log("msg", "Get - deadline exceeded", "key", key)
return nil, &consul.QueryMeta{LastIndex: q.WaitIndex}, nil
}
}
if value == nil {
- level.Debug(util.Logger).Log("msg", "Get - not found", "key", key)
+ level.Debug(util_log.Logger).Log("msg", "Get - not found", "key", key)
return nil, &consul.QueryMeta{LastIndex: m.current}, nil
}
- level.Debug(util.Logger).Log("msg", "Get", "key", key, "modify_index", value.ModifyIndex, "value", fmt.Sprintf("%.40q", value.Value))
+ level.Debug(util_log.Logger).Log("msg", "Get", "key", key, "modify_index", value.ModifyIndex, "value", fmt.Sprintf("%.40q", value.Value))
return copyKVPair(value), &consul.QueryMeta{LastIndex: value.ModifyIndex}, nil
}
@@ -203,7 +203,7 @@ func (m *mockKV) ResetIndex() {
m.current = 0
m.cond.Broadcast()
- level.Debug(util.Logger).Log("msg", "Reset")
+ level.Debug(util_log.Logger).Log("msg", "Reset")
}
func (m *mockKV) ResetIndexForKey(key string) {
@@ -215,7 +215,7 @@ func (m *mockKV) ResetIndexForKey(key string) {
}
m.cond.Broadcast()
- level.Debug(util.Logger).Log("msg", "ResetIndexForKey", "key", key)
+ level.Debug(util_log.Logger).Log("msg", "ResetIndexForKey", "key", key)
}
// mockedMaxWaitTime returns the minimum duration between the input duration
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go
index fa1e617326899..222fb4ee31989 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go
@@ -15,6 +15,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
cortex_tls "github.com/cortexproject/cortex/pkg/util/tls"
)
@@ -106,7 +107,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
for i := 0; i < c.cfg.MaxRetries; i++ {
resp, err := c.cli.Get(ctx, key)
if err != nil {
- level.Error(util.Logger).Log("msg", "error getting key", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error getting key", "key", key, "err", err)
lastErr = err
continue
}
@@ -115,7 +116,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
if len(resp.Kvs) > 0 {
intermediate, err = c.codec.Decode(resp.Kvs[0].Value)
if err != nil {
- level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err)
lastErr = err
continue
}
@@ -139,7 +140,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
buf, err := c.codec.Encode(intermediate)
if err != nil {
- level.Error(util.Logger).Log("msg", "error serialising value", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error serialising value", "key", key, "err", err)
lastErr = err
continue
}
@@ -149,13 +150,13 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
Then(clientv3.OpPut(key, string(buf))).
Commit()
if err != nil {
- level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error CASing", "key", key, "err", err)
lastErr = err
continue
}
// result is not Succeeded if the the comparison was false, meaning if the modify indexes did not match.
if !result.Succeeded {
- level.Debug(util.Logger).Log("msg", "failed to CAS, revision and version did not match in etcd", "key", key, "revision", revision)
+ level.Debug(util_log.Logger).Log("msg", "failed to CAS, revision and version did not match in etcd", "key", key, "revision", revision)
continue
}
@@ -183,7 +184,7 @@ outer:
for backoff.Ongoing() {
for resp := range c.cli.Watch(watchCtx, key) {
if err := resp.Err(); err != nil {
- level.Error(util.Logger).Log("msg", "watch error", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "watch error", "key", key, "err", err)
continue outer
}
@@ -192,7 +193,7 @@ outer:
for _, event := range resp.Events {
out, err := c.codec.Decode(event.Kv.Value)
if err != nil {
- level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err)
continue
}
@@ -219,7 +220,7 @@ outer:
for backoff.Ongoing() {
for resp := range c.cli.Watch(watchCtx, key, clientv3.WithPrefix()) {
if err := resp.Err(); err != nil {
- level.Error(util.Logger).Log("msg", "watch error", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "watch error", "key", key, "err", err)
continue outer
}
@@ -228,7 +229,7 @@ outer:
for _, event := range resp.Events {
out, err := c.codec.Decode(event.Kv.Value)
if err != nil {
- level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err)
+ level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err)
continue
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go
index c4687721233cd..f739b67241cd8 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go
@@ -6,7 +6,7 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/hashicorp/memberlist"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// ringBroadcast implements memberlist.Broadcast interface, which is used by memberlist.TransmitLimitedQueue.
@@ -45,7 +45,7 @@ func (r ringBroadcast) Invalidates(old memberlist.Broadcast) bool {
// otherwise, we may be invalidating some older messages, which however covered different
// ingesters
if r.version >= oldb.version {
- level.Debug(util.Logger).Log("msg", "Invalidating forwarded broadcast", "key", r.key, "version", r.version, "oldVersion", oldb.version, "content", fmt.Sprintf("%v", r.content), "oldContent", fmt.Sprintf("%v", oldb.content))
+ level.Debug(util_log.Logger).Log("msg", "Invalidating forwarded broadcast", "key", r.key, "version", r.version, "oldVersion", oldb.version, "content", fmt.Sprintf("%v", r.content), "oldContent", fmt.Sprintf("%v", oldb.content))
return true
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go
index 26495ff243bd0..056cc78171871 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go
@@ -23,6 +23,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -192,7 +193,7 @@ func generateRandomSuffix() string {
suffix := make([]byte, 4)
_, err := rand.Read(suffix)
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to generate random suffix", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to generate random suffix", "err", err)
return "error"
}
return fmt.Sprintf("%2x", suffix)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go
index f37d6fdc6656d..6b35a1e690417 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go
@@ -11,7 +11,7 @@ import (
// loggerAdapter wraps a Logger and allows it to be passed to the stdlib
// logger's SetOutput. It understand and parses output produced by memberlist
-// library (esp. level). Timestamp from memberlist can be ignored (eg. util.Logger
+// library (esp. level). Timestamp from memberlist can be ignored (eg. pkg/util/log.Logger
// is set up to auto-include timestamp with every message already)
type loggerAdapter struct {
log.Logger
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go
index e0fcf7c9964df..2cbcb6b15a983 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go
@@ -8,7 +8,7 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/prometheus/client_golang/prometheus"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -191,7 +191,7 @@ func (m *KV) createAndRegisterMetrics() {
}
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to register prometheus metrics for memberlist", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to register prometheus metrics for memberlist", "err", err)
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go
index c899b634326ea..ac7ae011df718 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go
@@ -5,7 +5,7 @@ import (
"github.com/go-kit/kit/log/level"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// The mockClient does not anything.
@@ -13,7 +13,7 @@ import (
type mockClient struct{}
func buildMockClient() (Client, error) {
- level.Warn(util.Logger).Log("msg", "created mockClient for testing only")
+ level.Warn(util_log.Logger).Log("msg", "created mockClient for testing only")
return mockClient{}, nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go
index 3817725fe3a3b..3bfb1bcdbba71 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go
@@ -11,7 +11,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/atomic"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/go-kit/kit/log/level"
)
@@ -118,7 +118,7 @@ func NewMultiClient(cfg MultiConfig, clients []kvclient) *MultiClient {
mirrorTimeout: cfg.MirrorTimeout,
mirroringEnabled: atomic.NewBool(cfg.MirrorEnabled),
- logger: log.With(util.Logger, "component", "multikv"),
+ logger: log.With(util_log.Logger, "component", "multikv"),
}
ctx, cancelFn := context.WithCancel(context.Background())
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
index bd3d4bf3958bb..4e82d645209b5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
@@ -528,7 +528,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error {
ringDesc = in.(*Desc)
}
- ingesterDesc, ok := ringDesc.Ingesters[i.ID]
+ instanceDesc, ok := ringDesc.Ingesters[i.ID]
if !ok {
// The instance doesn't exist in the ring, so it's safe to set the registered timestamp
// as of now.
@@ -554,27 +554,27 @@ func (i *Lifecycler) initRing(ctx context.Context) error {
// The instance already exists in the ring, so we can't change the registered timestamp (even if it's zero)
// but we need to update the local state accordingly.
- i.setRegisteredAt(ingesterDesc.GetRegisteredAt())
+ i.setRegisteredAt(instanceDesc.GetRegisteredAt())
// If the ingester is in the JOINING state this means it crashed due to
// a failed token transfer or some other reason during startup. We want
// to set it back to PENDING in order to start the lifecycle from the
// beginning.
- if ingesterDesc.State == JOINING {
+ if instanceDesc.State == JOINING {
level.Warn(log.Logger).Log("msg", "instance found in ring as JOINING, setting to PENDING",
"ring", i.RingName)
- ingesterDesc.State = PENDING
+ instanceDesc.State = PENDING
return ringDesc, true, nil
}
// If the ingester failed to clean it's ring entry up in can leave it's state in LEAVING.
// Move it into ACTIVE to ensure the ingester joins the ring.
- if ingesterDesc.State == LEAVING && len(ingesterDesc.Tokens) == i.cfg.NumTokens {
- ingesterDesc.State = ACTIVE
+ if instanceDesc.State == LEAVING && len(instanceDesc.Tokens) == i.cfg.NumTokens {
+ instanceDesc.State = ACTIVE
}
// We exist in the ring, so assume the ring is right and copy out tokens & state out of there.
- i.setState(ingesterDesc.State)
+ i.setState(instanceDesc.State)
tokens, _ := ringDesc.TokensFor(i.ID)
i.setTokens(tokens)
@@ -705,18 +705,18 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error {
ringDesc = in.(*Desc)
}
- ingesterDesc, ok := ringDesc.Ingesters[i.ID]
+ instanceDesc, ok := ringDesc.Ingesters[i.ID]
if !ok {
// consul must have restarted
level.Info(log.Logger).Log("msg", "found empty ring, inserting tokens", "ring", i.RingName)
ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt())
} else {
- ingesterDesc.Timestamp = time.Now().Unix()
- ingesterDesc.State = i.GetState()
- ingesterDesc.Addr = i.Addr
- ingesterDesc.Zone = i.Zone
- ingesterDesc.RegisteredTimestamp = i.getRegisteredAt().Unix()
- ringDesc.Ingesters[i.ID] = ingesterDesc
+ instanceDesc.Timestamp = time.Now().Unix()
+ instanceDesc.State = i.GetState()
+ instanceDesc.Addr = i.Addr
+ instanceDesc.Zone = i.Zone
+ instanceDesc.RegisteredTimestamp = i.getRegisteredAt().Unix()
+ ringDesc.Ingesters[i.ID] = instanceDesc
}
return ringDesc, true, nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go
index cd41039e57d6b..4187275184f4e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go
@@ -12,8 +12,8 @@ import (
"github.com/cortexproject/cortex/pkg/ring/kv/memberlist"
)
-// ByAddr is a sortable list of IngesterDesc.
-type ByAddr []IngesterDesc
+// ByAddr is a sortable list of InstanceDesc.
+type ByAddr []InstanceDesc
func (ts ByAddr) Len() int { return len(ts) }
func (ts ByAddr) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
@@ -32,15 +32,15 @@ func GetCodec() codec.Codec {
// NewDesc returns an empty ring.Desc
func NewDesc() *Desc {
return &Desc{
- Ingesters: map[string]IngesterDesc{},
+ Ingesters: map[string]InstanceDesc{},
}
}
// AddIngester adds the given ingester to the ring. Ingester will only use supplied tokens,
// any other tokens are removed.
-func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState, registeredAt time.Time) IngesterDesc {
+func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState, registeredAt time.Time) InstanceDesc {
if d.Ingesters == nil {
- d.Ingesters = map[string]IngesterDesc{}
+ d.Ingesters = map[string]InstanceDesc{}
}
registeredTimestamp := int64(0)
@@ -48,7 +48,7 @@ func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state Ingeste
registeredTimestamp = registeredAt.Unix()
}
- ingester := IngesterDesc{
+ ingester := InstanceDesc{
Addr: addr,
Timestamp: time.Now().Unix(),
RegisteredTimestamp: registeredTimestamp,
@@ -87,8 +87,8 @@ func (d *Desc) ClaimTokens(from, to string) Tokens {
}
// FindIngestersByState returns the list of ingesters in the given state
-func (d *Desc) FindIngestersByState(state IngesterState) []IngesterDesc {
- var result []IngesterDesc
+func (d *Desc) FindIngestersByState(state IngesterState) []InstanceDesc {
+ var result []InstanceDesc
for _, ing := range d.Ingesters {
if ing.State == state {
result = append(result, ing)
@@ -125,7 +125,7 @@ func (d *Desc) TokensFor(id string) (myTokens, allTokens Tokens) {
// GetRegisteredAt returns the timestamp when the instance has been registered to the ring
// or a zero value if unknown.
-func (i *IngesterDesc) GetRegisteredAt() time.Time {
+func (i *InstanceDesc) GetRegisteredAt() time.Time {
if i == nil || i.RegisteredTimestamp == 0 {
return time.Time{}
}
@@ -133,7 +133,7 @@ func (i *IngesterDesc) GetRegisteredAt() time.Time {
return time.Unix(i.RegisteredTimestamp, 0)
}
-func (i *IngesterDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, now time.Time) bool {
+func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, now time.Time) bool {
healthy := op.IsInstanceInStateHealthy(i.State)
return healthy && now.Unix()-i.Timestamp <= heartbeatTimeout.Milliseconds()/1000
@@ -245,8 +245,8 @@ func (d *Desc) MergeContent() []string {
// buildNormalizedIngestersMap will do the following:
// - sorts tokens and removes duplicates (only within single ingester)
// - it doesn't modify input ring
-func buildNormalizedIngestersMap(inputRing *Desc) map[string]IngesterDesc {
- out := map[string]IngesterDesc{}
+func buildNormalizedIngestersMap(inputRing *Desc) map[string]InstanceDesc {
+ out := map[string]InstanceDesc{}
// Make sure LEFT ingesters have no tokens
for n, ing := range inputRing.Ingesters {
@@ -284,7 +284,7 @@ func buildNormalizedIngestersMap(inputRing *Desc) map[string]IngesterDesc {
return out
}
-func conflictingTokensExist(normalizedIngesters map[string]IngesterDesc) bool {
+func conflictingTokensExist(normalizedIngesters map[string]InstanceDesc) bool {
count := 0
for _, ing := range normalizedIngesters {
count += len(ing.Tokens)
@@ -309,7 +309,7 @@ func conflictingTokensExist(normalizedIngesters map[string]IngesterDesc) bool {
// 2) otherwise node names are compared, and node with "lower" name wins the token
//
// Modifies ingesters map with updated tokens.
-func resolveConflicts(normalizedIngesters map[string]IngesterDesc) {
+func resolveConflicts(normalizedIngesters map[string]InstanceDesc) {
size := 0
for _, ing := range normalizedIngesters {
size += len(ing.Tokens)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
index adc619e85cbfe..391773dff1503 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
@@ -9,7 +9,7 @@ import (
// ReplicationSet describes the ingesters to talk to for a given key, and how
// many errors to tolerate.
type ReplicationSet struct {
- Ingesters []IngesterDesc
+ Ingesters []InstanceDesc
// Maximum number of tolerated failing instances. Max errors and max unavailable zones are
// mutually exclusive.
@@ -22,11 +22,11 @@ type ReplicationSet struct {
// Do function f in parallel for all replicas in the set, erroring is we exceed
// MaxErrors and returning early otherwise.
-func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(context.Context, *IngesterDesc) (interface{}, error)) ([]interface{}, error) {
+func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(context.Context, *InstanceDesc) (interface{}, error)) ([]interface{}, error) {
type instanceResult struct {
res interface{}
err error
- instance *IngesterDesc
+ instance *InstanceDesc
}
// Initialise the result tracker, which is use to keep track of successes and failures.
@@ -46,7 +46,7 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont
// Spawn a goroutine for each instance.
for i := range r.Ingesters {
- go func(i int, ing *IngesterDesc) {
+ go func(i int, ing *InstanceDesc) {
// Wait to send extra requests. Works only when zone-awareness is disabled.
if delay > 0 && r.MaxUnavailableZones == 0 && i >= len(r.Ingesters)-r.MaxErrors {
after := time.NewTimer(delay)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go
index 09f12e3cebbf6..fcdf5441dd24e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go
@@ -3,7 +3,7 @@ package ring
type replicationSetResultTracker interface {
// Signals an instance has done the execution, either successful (no error)
// or failed (with error).
- done(instance *IngesterDesc, err error)
+ done(instance *InstanceDesc, err error)
// Returns true if the minimum number of successful results have been received.
succeeded() bool
@@ -19,7 +19,7 @@ type defaultResultTracker struct {
maxErrors int
}
-func newDefaultResultTracker(instances []IngesterDesc, maxErrors int) *defaultResultTracker {
+func newDefaultResultTracker(instances []InstanceDesc, maxErrors int) *defaultResultTracker {
return &defaultResultTracker{
minSucceeded: len(instances) - maxErrors,
numSucceeded: 0,
@@ -28,7 +28,7 @@ func newDefaultResultTracker(instances []IngesterDesc, maxErrors int) *defaultRe
}
}
-func (t *defaultResultTracker) done(_ *IngesterDesc, err error) {
+func (t *defaultResultTracker) done(_ *InstanceDesc, err error) {
if err == nil {
t.numSucceeded++
} else {
@@ -53,7 +53,7 @@ type zoneAwareResultTracker struct {
maxUnavailableZones int
}
-func newZoneAwareResultTracker(instances []IngesterDesc, maxUnavailableZones int) *zoneAwareResultTracker {
+func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int) *zoneAwareResultTracker {
t := &zoneAwareResultTracker{
waitingByZone: make(map[string]int),
failuresByZone: make(map[string]int),
@@ -68,7 +68,7 @@ func newZoneAwareResultTracker(instances []IngesterDesc, maxUnavailableZones int
return t
}
-func (t *zoneAwareResultTracker) done(instance *IngesterDesc, err error) {
+func (t *zoneAwareResultTracker) done(instance *InstanceDesc, err error) {
t.waitingByZone[instance.Zone]--
if err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go
index f28a54e618274..e572cb77a441c 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go
@@ -11,7 +11,7 @@ type ReplicationStrategy interface {
// Filter out unhealthy instances and checks if there're enough instances
// for an operation to succeed. Returns an error if there are not enough
// instances.
- Filter(instances []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) (healthy []IngesterDesc, maxFailures int, err error)
+ Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) (healthy []InstanceDesc, maxFailures int, err error)
}
type defaultReplicationStrategy struct{}
@@ -26,7 +26,7 @@ func NewDefaultReplicationStrategy() ReplicationStrategy {
// - Filters out unhealthy instances so the one doesn't even try to write to them.
// - Checks there are enough instances for an operation to succeed.
// The instances argument may be overwritten.
-func (s *defaultReplicationStrategy) Filter(instances []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]IngesterDesc, int, error) {
+func (s *defaultReplicationStrategy) Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]InstanceDesc, int, error) {
// We need a response from a quorum of instances, which is n/2 + 1. In the
// case of a node joining/leaving, the actual replica set might be bigger
// than the replication factor, so use the bigger or the two.
@@ -71,7 +71,7 @@ func NewIgnoreUnhealthyInstancesReplicationStrategy() ReplicationStrategy {
return &ignoreUnhealthyInstancesReplicationStrategy{}
}
-func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []IngesterDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool) (healthy []IngesterDesc, maxFailures int, err error) {
+func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []InstanceDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool) (healthy []InstanceDesc, maxFailures int, err error) {
now := time.Now()
// Filter out unhealthy instances.
for i := 0; i < len(instances); {
@@ -90,7 +90,7 @@ func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []Ingeste
return instances, len(instances) - 1, nil
}
-func (r *Ring) IsHealthy(instance *IngesterDesc, op Operation, now time.Time) bool {
+func (r *Ring) IsHealthy(instance *InstanceDesc, op Operation, now time.Time) bool {
return instance.IsHealthy(op, r.cfg.HeartbeatTimeout, now)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
index ad24dc31a02e5..60e3b6e772eed 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
@@ -49,7 +49,7 @@ type ReadRing interface {
// Get returns n (or more) instances which form the replicas for the given key.
// bufDescs, bufHosts and bufZones are slices to be overwritten for the return value
// to avoid memory allocation; can be nil, or created with ring.MakeBuffersForGet().
- Get(key uint32, op Operation, bufDescs []IngesterDesc, bufHosts, bufZones []string) (ReplicationSet, error)
+ Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, bufZones []string) (ReplicationSet, error)
// GetAllHealthy returns all healthy instances in the ring, for the given operation.
// This function doesn't check if the quorum is honored, so doesn't fail if the number
@@ -304,7 +304,7 @@ func (r *Ring) loop(ctx context.Context) error {
}
// Get returns n (or more) instances which form the replicas for the given key.
-func (r *Ring) Get(key uint32, op Operation, bufDescs []IngesterDesc, bufHosts, bufZones []string) (ReplicationSet, error) {
+func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, bufZones []string) (ReplicationSet, error) {
r.mtx.RLock()
defer r.mtx.RUnlock()
if r.ringDesc == nil || len(r.ringTokens) == 0 {
@@ -380,7 +380,7 @@ func (r *Ring) GetAllHealthy(op Operation) (ReplicationSet, error) {
}
now := time.Now()
- instances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters))
+ instances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters))
for _, instance := range r.ringDesc.Ingesters {
if r.IsHealthy(&instance, op, now) {
instances = append(instances, instance)
@@ -403,7 +403,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro
}
// Build the initial replication set, excluding unhealthy instances.
- healthyInstances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters))
+ healthyInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters))
zoneFailures := make(map[string]struct{})
now := time.Now()
@@ -438,7 +438,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro
// enabled (data is replicated to RF different zones), there's no benefit in
// querying healthy instances from "failing zones". A zone is considered
// failed if there is single error.
- filteredInstances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters))
+ filteredInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters))
for _, instance := range healthyInstances {
if _, ok := zoneFailures[instance.Zone]; !ok {
filteredInstances = append(filteredInstances, instance)
@@ -648,7 +648,7 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur
actualZones = []string{""}
}
- shard := make(map[string]IngesterDesc, size)
+ shard := make(map[string]InstanceDesc, size)
// We need to iterate zones always in the same order to guarantee stability.
for _, zone := range actualZones {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
index 30e1646acf5c8..7bfadacad7b8c 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
@@ -60,7 +60,7 @@ func (IngesterState) EnumDescriptor() ([]byte, []int) {
}
type Desc struct {
- Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Ingesters map[string]InstanceDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *Desc) Reset() { *m = Desc{} }
@@ -95,14 +95,14 @@ func (m *Desc) XXX_DiscardUnknown() {
var xxx_messageInfo_Desc proto.InternalMessageInfo
-func (m *Desc) GetIngesters() map[string]IngesterDesc {
+func (m *Desc) GetIngesters() map[string]InstanceDesc {
if m != nil {
return m.Ingesters
}
return nil
}
-type IngesterDesc struct {
+type InstanceDesc struct {
Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
// Unix timestamp (with seconds precision) of the last heartbeat sent
// by this instance.
@@ -126,17 +126,17 @@ type IngesterDesc struct {
RegisteredTimestamp int64 `protobuf:"varint,8,opt,name=registered_timestamp,json=registeredTimestamp,proto3" json:"registered_timestamp,omitempty"`
}
-func (m *IngesterDesc) Reset() { *m = IngesterDesc{} }
-func (*IngesterDesc) ProtoMessage() {}
-func (*IngesterDesc) Descriptor() ([]byte, []int) {
+func (m *InstanceDesc) Reset() { *m = InstanceDesc{} }
+func (*InstanceDesc) ProtoMessage() {}
+func (*InstanceDesc) Descriptor() ([]byte, []int) {
return fileDescriptor_26381ed67e202a6e, []int{1}
}
-func (m *IngesterDesc) XXX_Unmarshal(b []byte) error {
+func (m *InstanceDesc) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *InstanceDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_IngesterDesc.Marshal(b, m, deterministic)
+ return xxx_messageInfo_InstanceDesc.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -146,54 +146,54 @@ func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
return b[:n], nil
}
}
-func (m *IngesterDesc) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngesterDesc.Merge(m, src)
+func (m *InstanceDesc) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InstanceDesc.Merge(m, src)
}
-func (m *IngesterDesc) XXX_Size() int {
+func (m *InstanceDesc) XXX_Size() int {
return m.Size()
}
-func (m *IngesterDesc) XXX_DiscardUnknown() {
- xxx_messageInfo_IngesterDesc.DiscardUnknown(m)
+func (m *InstanceDesc) XXX_DiscardUnknown() {
+ xxx_messageInfo_InstanceDesc.DiscardUnknown(m)
}
-var xxx_messageInfo_IngesterDesc proto.InternalMessageInfo
+var xxx_messageInfo_InstanceDesc proto.InternalMessageInfo
-func (m *IngesterDesc) GetAddr() string {
+func (m *InstanceDesc) GetAddr() string {
if m != nil {
return m.Addr
}
return ""
}
-func (m *IngesterDesc) GetTimestamp() int64 {
+func (m *InstanceDesc) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
-func (m *IngesterDesc) GetState() IngesterState {
+func (m *InstanceDesc) GetState() IngesterState {
if m != nil {
return m.State
}
return ACTIVE
}
-func (m *IngesterDesc) GetTokens() []uint32 {
+func (m *InstanceDesc) GetTokens() []uint32 {
if m != nil {
return m.Tokens
}
return nil
}
-func (m *IngesterDesc) GetZone() string {
+func (m *InstanceDesc) GetZone() string {
if m != nil {
return m.Zone
}
return ""
}
-func (m *IngesterDesc) GetRegisteredTimestamp() int64 {
+func (m *InstanceDesc) GetRegisteredTimestamp() int64 {
if m != nil {
return m.RegisteredTimestamp
}
@@ -203,41 +203,41 @@ func (m *IngesterDesc) GetRegisteredTimestamp() int64 {
func init() {
proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value)
proto.RegisterType((*Desc)(nil), "ring.Desc")
- proto.RegisterMapType((map[string]IngesterDesc)(nil), "ring.Desc.IngestersEntry")
- proto.RegisterType((*IngesterDesc)(nil), "ring.IngesterDesc")
+ proto.RegisterMapType((map[string]InstanceDesc)(nil), "ring.Desc.IngestersEntry")
+ proto.RegisterType((*InstanceDesc)(nil), "ring.InstanceDesc")
}
func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) }
var fileDescriptor_26381ed67e202a6e = []byte{
- // 421 bytes of a gzipped FileDescriptorProto
+ // 427 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x31, 0x6f, 0xd3, 0x40,
0x1c, 0xc5, 0xef, 0x1f, 0x5f, 0x5c, 0xe7, 0x1f, 0x5a, 0x59, 0x57, 0x84, 0x4c, 0x85, 0x0e, 0xab,
0x93, 0x41, 0xc2, 0x15, 0x81, 0x01, 0x21, 0x31, 0xb4, 0xd4, 0x20, 0x5b, 0x51, 0xa8, 0x4c, 0xd4,
- 0x15, 0x39, 0xcd, 0x61, 0xac, 0x12, 0xbb, 0xb2, 0x2f, 0x48, 0x65, 0xe2, 0x23, 0xf0, 0x05, 0xd8,
- 0xf9, 0x28, 0x1d, 0x33, 0xa1, 0x4e, 0x88, 0x38, 0x0b, 0x63, 0x3f, 0x02, 0xba, 0x73, 0x23, 0x93,
- 0xed, 0xfd, 0xfc, 0xde, 0xff, 0x3d, 0x0f, 0x87, 0x58, 0x66, 0x79, 0xea, 0x5f, 0x94, 0x85, 0x2c,
- 0x18, 0x55, 0x7a, 0xef, 0x49, 0x9a, 0xc9, 0x4f, 0xf3, 0x89, 0x7f, 0x56, 0xcc, 0x0e, 0xd2, 0x22,
- 0x2d, 0x0e, 0xb4, 0x39, 0x99, 0x7f, 0xd4, 0xa4, 0x41, 0xab, 0xe6, 0x68, 0xff, 0x07, 0x20, 0x3d,
- 0x16, 0xd5, 0x19, 0x7b, 0x85, 0xbd, 0x2c, 0x4f, 0x45, 0x25, 0x45, 0x59, 0x39, 0xe0, 0x1a, 0x5e,
- 0x7f, 0x70, 0xdf, 0xd7, 0xed, 0xca, 0xf6, 0xc3, 0xb5, 0x17, 0xe4, 0xb2, 0xbc, 0x3c, 0xa2, 0x57,
- 0xbf, 0x1f, 0x92, 0xb8, 0xbd, 0xd8, 0x3b, 0xc1, 0x9d, 0xcd, 0x08, 0xb3, 0xd1, 0x38, 0x17, 0x97,
- 0x0e, 0xb8, 0xe0, 0xf5, 0x62, 0x25, 0x99, 0x87, 0xdd, 0x2f, 0xc9, 0xe7, 0xb9, 0x70, 0x3a, 0x2e,
- 0x78, 0xfd, 0x01, 0x6b, 0xea, 0xd7, 0x67, 0x6a, 0x26, 0x6e, 0x02, 0x2f, 0x3b, 0x2f, 0x20, 0xa2,
- 0x56, 0xc7, 0x36, 0xf6, 0x7f, 0x01, 0xde, 0xf9, 0x3f, 0xc1, 0x18, 0xd2, 0x64, 0x3a, 0x2d, 0x6f,
- 0x7b, 0xb5, 0x66, 0x0f, 0xb0, 0x27, 0xb3, 0x99, 0xa8, 0x64, 0x32, 0xbb, 0xd0, 0xe5, 0x46, 0xdc,
- 0x7e, 0x60, 0x8f, 0xb0, 0x5b, 0xc9, 0x44, 0x0a, 0xc7, 0x70, 0xc1, 0xdb, 0x19, 0xec, 0x6e, 0xce,
- 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71, 0x2e, 0xf2, 0xca, 0x31, 0x5d, 0xc3,
- 0xdb, 0x8e, 0x6f, 0x49, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c, 0x35, 0xa3, 0x4a, 0xb3, 0xa7, 0x78,
- 0xb7, 0x14, 0x69, 0xa6, 0x3a, 0xc4, 0xf4, 0x43, 0xbb, 0x6f, 0xe9, 0xfd, 0xdd, 0xd6, 0x1b, 0xaf,
- 0xad, 0x88, 0x5a, 0xd4, 0xee, 0x46, 0xd4, 0xea, 0xda, 0xe6, 0xe3, 0x21, 0x6e, 0x6f, 0xfc, 0x02,
- 0x43, 0x34, 0x0f, 0x5f, 0x8f, 0xc3, 0xd3, 0xc0, 0x26, 0xac, 0x8f, 0x5b, 0xc3, 0xe0, 0xf0, 0x34,
- 0x1c, 0xbd, 0xb5, 0x41, 0xc1, 0x49, 0x30, 0x3a, 0x56, 0xd0, 0x51, 0x10, 0xbd, 0x0b, 0x47, 0x0a,
- 0x0c, 0x66, 0x21, 0x1d, 0x06, 0x6f, 0xc6, 0x36, 0x3d, 0x7a, 0xbe, 0x58, 0x72, 0x72, 0xbd, 0xe4,
- 0xe4, 0x66, 0xc9, 0xe1, 0x5b, 0xcd, 0xe1, 0x67, 0xcd, 0xe1, 0xaa, 0xe6, 0xb0, 0xa8, 0x39, 0xfc,
- 0xa9, 0x39, 0xfc, 0xad, 0x39, 0xb9, 0xa9, 0x39, 0x7c, 0x5f, 0x71, 0xb2, 0x58, 0x71, 0x72, 0xbd,
- 0xe2, 0x64, 0x62, 0xea, 0x37, 0xf0, 0xec, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x56, 0xd8, 0x87,
- 0x71, 0x46, 0x02, 0x00, 0x00,
+ 0x15, 0x39, 0xc9, 0x61, 0xac, 0x12, 0xbb, 0xb2, 0x2f, 0x48, 0x65, 0xe2, 0x23, 0xf0, 0x05, 0xd8,
+ 0xf9, 0x28, 0x1d, 0x33, 0xa1, 0x4e, 0x88, 0x38, 0x0b, 0x63, 0x3f, 0x02, 0xba, 0x73, 0x23, 0xd3,
+ 0xed, 0xfd, 0xfc, 0xde, 0xbd, 0xf7, 0x1f, 0x8c, 0x58, 0x66, 0x79, 0xea, 0x9f, 0x97, 0x85, 0x2c,
+ 0x18, 0x55, 0x7a, 0xef, 0x49, 0x9a, 0xc9, 0x4f, 0x8b, 0x89, 0x3f, 0x2d, 0xe6, 0x07, 0x69, 0x91,
+ 0x16, 0x07, 0xda, 0x9c, 0x2c, 0x3e, 0x6a, 0xd2, 0xa0, 0x55, 0xf3, 0x68, 0xff, 0x07, 0x20, 0x3d,
+ 0x16, 0xd5, 0x94, 0xbd, 0xc2, 0x5e, 0x96, 0xa7, 0xa2, 0x92, 0xa2, 0xac, 0x1c, 0x70, 0x0d, 0xaf,
+ 0x3f, 0xb8, 0xef, 0xeb, 0x76, 0x65, 0xfb, 0xe1, 0xc6, 0x0b, 0x72, 0x59, 0x5e, 0x1c, 0xd1, 0xcb,
+ 0xdf, 0x0f, 0x49, 0xdc, 0xbe, 0xd8, 0x3b, 0xc1, 0x9d, 0xdb, 0x11, 0x66, 0xa3, 0x71, 0x26, 0x2e,
+ 0x1c, 0x70, 0xc1, 0xeb, 0xc5, 0x4a, 0x32, 0x0f, 0xbb, 0x5f, 0x92, 0xcf, 0x0b, 0xe1, 0x74, 0x5c,
+ 0xf0, 0xfa, 0x03, 0xd6, 0xd4, 0x87, 0x79, 0x25, 0x93, 0x7c, 0x2a, 0xd4, 0x4c, 0xdc, 0x04, 0x5e,
+ 0x76, 0x5e, 0x40, 0x44, 0xad, 0x8e, 0x6d, 0xec, 0xff, 0x02, 0xbc, 0xf3, 0x7f, 0x82, 0x31, 0xa4,
+ 0xc9, 0x6c, 0x56, 0xde, 0xf4, 0x6a, 0xcd, 0x1e, 0x60, 0x4f, 0x66, 0x73, 0x51, 0xc9, 0x64, 0x7e,
+ 0xae, 0xcb, 0x8d, 0xb8, 0xfd, 0xc0, 0x1e, 0x61, 0xb7, 0x92, 0x89, 0x14, 0x8e, 0xe1, 0x82, 0xb7,
+ 0x33, 0xd8, 0xdd, 0xcc, 0x36, 0xd7, 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71,
+ 0x26, 0xf2, 0xca, 0x31, 0x5d, 0xc3, 0xdb, 0x8e, 0x6f, 0x48, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c,
+ 0x35, 0xa3, 0x4a, 0xb3, 0xa7, 0x78, 0xb7, 0x14, 0x69, 0xa6, 0x3a, 0xc4, 0xec, 0x43, 0xbb, 0x6f,
+ 0xe9, 0xfd, 0xdd, 0xd6, 0x1b, 0x6f, 0xac, 0x88, 0x5a, 0xd4, 0xee, 0x46, 0xd4, 0xea, 0xda, 0xe6,
+ 0xe3, 0x21, 0x6e, 0xdf, 0x3a, 0x81, 0x21, 0x9a, 0x87, 0xaf, 0xc7, 0xe1, 0x69, 0x60, 0x13, 0xd6,
+ 0xc7, 0xad, 0x61, 0x70, 0x78, 0x1a, 0x8e, 0xde, 0xda, 0xa0, 0xe0, 0x24, 0x18, 0x1d, 0x2b, 0xe8,
+ 0x28, 0x88, 0xde, 0x85, 0x23, 0x05, 0x06, 0xb3, 0x90, 0x0e, 0x83, 0x37, 0x63, 0x9b, 0x1e, 0x3d,
+ 0x5f, 0xae, 0x38, 0xb9, 0x5a, 0x71, 0x72, 0xbd, 0xe2, 0xf0, 0xad, 0xe6, 0xf0, 0xb3, 0xe6, 0x70,
+ 0x59, 0x73, 0x58, 0xd6, 0x1c, 0xfe, 0xd4, 0x1c, 0xfe, 0xd6, 0x9c, 0x5c, 0xd7, 0x1c, 0xbe, 0xaf,
+ 0x39, 0x59, 0xae, 0x39, 0xb9, 0x5a, 0x73, 0x32, 0x31, 0xf5, 0x3f, 0xf0, 0xec, 0x5f, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0x79, 0x5b, 0xe1, 0x8b, 0x46, 0x02, 0x00, 0x00,
}
func (x IngesterState) String() string {
@@ -278,14 +278,14 @@ func (this *Desc) Equal(that interface{}) bool {
}
return true
}
-func (this *IngesterDesc) Equal(that interface{}) bool {
+func (this *InstanceDesc) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*IngesterDesc)
+ that1, ok := that.(*InstanceDesc)
if !ok {
- that2, ok := that.(IngesterDesc)
+ that2, ok := that.(InstanceDesc)
if ok {
that1 = &that2
} else {
@@ -333,7 +333,7 @@ func (this *Desc) GoString() string {
keysForIngesters = append(keysForIngesters, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForIngesters)
- mapStringForIngesters := "map[string]IngesterDesc{"
+ mapStringForIngesters := "map[string]InstanceDesc{"
for _, k := range keysForIngesters {
mapStringForIngesters += fmt.Sprintf("%#v: %#v,", k, this.Ingesters[k])
}
@@ -344,12 +344,12 @@ func (this *Desc) GoString() string {
s = append(s, "}")
return strings.Join(s, "")
}
-func (this *IngesterDesc) GoString() string {
+func (this *InstanceDesc) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 10)
- s = append(s, "&ring.IngesterDesc{")
+ s = append(s, "&ring.InstanceDesc{")
s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n")
s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n")
@@ -414,7 +414,7 @@ func (m *Desc) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *IngesterDesc) Marshal() (dAtA []byte, err error) {
+func (m *InstanceDesc) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -424,12 +424,12 @@ func (m *IngesterDesc) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *IngesterDesc) MarshalTo(dAtA []byte) (int, error) {
+func (m *InstanceDesc) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *IngesterDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *InstanceDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -513,7 +513,7 @@ func (m *Desc) Size() (n int) {
return n
}
-func (m *IngesterDesc) Size() (n int) {
+func (m *InstanceDesc) Size() (n int) {
if m == nil {
return 0
}
@@ -561,7 +561,7 @@ func (this *Desc) String() string {
keysForIngesters = append(keysForIngesters, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForIngesters)
- mapStringForIngesters := "map[string]IngesterDesc{"
+ mapStringForIngesters := "map[string]InstanceDesc{"
for _, k := range keysForIngesters {
mapStringForIngesters += fmt.Sprintf("%v: %v,", k, this.Ingesters[k])
}
@@ -572,11 +572,11 @@ func (this *Desc) String() string {
}, "")
return s
}
-func (this *IngesterDesc) String() string {
+func (this *InstanceDesc) String() string {
if this == nil {
return "nil"
}
- s := strings.Join([]string{`&IngesterDesc{`,
+ s := strings.Join([]string{`&InstanceDesc{`,
`Addr:` + fmt.Sprintf("%v", this.Addr) + `,`,
`Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
`State:` + fmt.Sprintf("%v", this.State) + `,`,
@@ -654,10 +654,10 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.Ingesters == nil {
- m.Ingesters = make(map[string]IngesterDesc)
+ m.Ingesters = make(map[string]InstanceDesc)
}
var mapkey string
- mapvalue := &IngesterDesc{}
+ mapvalue := &InstanceDesc{}
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
@@ -731,7 +731,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
if postmsgIndex > l {
return io.ErrUnexpectedEOF
}
- mapvalue = &IngesterDesc{}
+ mapvalue = &InstanceDesc{}
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
return err
}
@@ -777,7 +777,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
+func (m *InstanceDesc) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -800,10 +800,10 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: IngesterDesc: wiretype end group for non-group")
+ return fmt.Errorf("proto: InstanceDesc: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: IngesterDesc: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: InstanceDesc: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto
index 2adc91a806c3e..4eab6f733ccf9 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto
@@ -8,11 +8,11 @@ option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
message Desc {
- map<string,IngesterDesc> ingesters = 1 [(gogoproto.nullable) = false];
+ map<string,InstanceDesc> ingesters = 1 [(gogoproto.nullable) = false];
reserved 2;
}
-message IngesterDesc {
+message InstanceDesc {
reserved 4, 5; // old, deprecated fields
string addr = 1;
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go
index 921900c2dc1a7..b1cf8210c75c0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go
@@ -122,8 +122,8 @@ func WaitRingStability(ctx context.Context, r *Ring, op Operation, minStability,
}
// MakeBuffersForGet returns buffers to use with Ring.Get().
-func MakeBuffersForGet() (bufDescs []IngesterDesc, bufHosts, bufZones []string) {
- bufDescs = make([]IngesterDesc, 0, GetBufferSize)
+func MakeBuffersForGet() (bufDescs []InstanceDesc, bufHosts, bufZones []string) {
+ bufDescs = make([]InstanceDesc, 0, GetBufferSize)
bufHosts = make([]string, 0, GetBufferSize)
bufZones = make([]string, 0, GetBufferSize)
return
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go
index 47a5be6a07dd4..62f83fa8313b4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go
@@ -4,7 +4,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
)
-func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) {
+func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) {
// When we initialize the ruler instance in the ring we want to start from
// a clean situation, so whatever is the state we set it ACTIVE, while we keep existing
// tokens (if any).
@@ -24,5 +24,5 @@ func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.De
func (r *Ruler) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {}
func (r *Ruler) OnRingInstanceStopping(_ *ring.BasicLifecycler) {}
-func (r *Ruler) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) {
+func (r *Ruler) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) {
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go
index 9bee7d46da35c..b2a1a0e551822 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go
@@ -171,8 +171,7 @@ func (r *DefaultMultiTenantManager) newManager(ctx context.Context, userID strin
reg := prometheus.NewRegistry()
r.userManagerMetrics.AddUserRegistry(userID, reg)
- logger := log.With(r.logger, "user", userID)
- return r.managerFactory(ctx, userID, notifier, logger, reg), nil
+ return r.managerFactory(ctx, userID, notifier, r.logger, reg), nil
}
func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string) (*notifier.Manager, error) {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go
index 8d78ab0e31503..5f0f60925717f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go
@@ -2,6 +2,7 @@ package ruler
import (
"context"
+ "flag"
"fmt"
"net/url"
"regexp"
@@ -16,8 +17,21 @@ import (
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/notifier"
+
+ "github.com/cortexproject/cortex/pkg/util"
+ "github.com/cortexproject/cortex/pkg/util/tls"
)
+type NotifierConfig struct {
+ TLS tls.ClientConfig `yaml:",inline"`
+ BasicAuth util.BasicAuth `yaml:",inline"`
+}
+
+func (cfg *NotifierConfig) RegisterFlags(f *flag.FlagSet) {
+ cfg.TLS.RegisterFlagsWithPrefix("ruler.alertmanager-client", f)
+ cfg.BasicAuth.RegisterFlagsWithPrefix("ruler.alertmanager-client.", f)
+}
+
// rulerNotifier bundles a notifier.Manager together with an associated
// Alertmanager service discovery manager and handles the lifecycle
// of both actors.
@@ -150,13 +164,21 @@ func amConfigFromURL(rulerConfig *Config, url *url.URL, apiVersion config.Alertm
PathPrefix: url.Path,
Timeout: model.Duration(rulerConfig.NotificationTimeout),
ServiceDiscoveryConfigs: sdConfig,
+ HTTPClientConfig: config_util.HTTPClientConfig{
+ TLSConfig: config_util.TLSConfig{
+ CAFile: rulerConfig.Notifier.TLS.CAPath,
+ CertFile: rulerConfig.Notifier.TLS.CertPath,
+ KeyFile: rulerConfig.Notifier.TLS.KeyPath,
+ InsecureSkipVerify: rulerConfig.Notifier.TLS.InsecureSkipVerify,
+ ServerName: rulerConfig.Notifier.TLS.ServerName,
+ },
+ },
}
+ // Check the URL for basic authentication information first
if url.User != nil {
- amConfig.HTTPClientConfig = config_util.HTTPClientConfig{
- BasicAuth: &config_util.BasicAuth{
- Username: url.User.Username(),
- },
+ amConfig.HTTPClientConfig.BasicAuth = &config_util.BasicAuth{
+ Username: url.User.Username(),
}
if password, isSet := url.User.Password(); isSet {
@@ -164,5 +186,13 @@ func amConfigFromURL(rulerConfig *Config, url *url.URL, apiVersion config.Alertm
}
}
+ // Override URL basic authentication configs with hard coded config values if present
+ if rulerConfig.Notifier.BasicAuth.IsEnabled() {
+ amConfig.HTTPClientConfig.BasicAuth = &config_util.BasicAuth{
+ Username: rulerConfig.Notifier.BasicAuth.Username,
+ Password: config_util.Secret(rulerConfig.Notifier.BasicAuth.Password),
+ }
+ }
+
return amConfig
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
index d5ac2fe70d2bb..6b4433d3acee6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
@@ -86,6 +86,8 @@ type Config struct {
NotificationQueueCapacity int `yaml:"notification_queue_capacity"`
// HTTP timeout duration when sending notifications to the Alertmanager.
NotificationTimeout time.Duration `yaml:"notification_timeout"`
+ // Client configs for interacting with the Alertmanager
+ Notifier NotifierConfig `yaml:"alertmanager_client"`
// Max time to tolerate outage for restoring "for" state of alert.
OutageTolerance time.Duration `yaml:"for_outage_tolerance"`
@@ -130,6 +132,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.ClientTLSConfig.RegisterFlagsWithPrefix("ruler.client", f)
cfg.StoreConfig.RegisterFlags(f)
cfg.Ring.RegisterFlags(f)
+ cfg.Notifier.RegisterFlags(f)
// Deprecated Flags that will be maintained to avoid user disruption
flagext.DeprecatedFlag(f, "ruler.client-timeout", "This flag has been renamed to ruler.configs.client-timeout")
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go
index 3bd18e9a247db..2ea58e7cfbd61 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go
@@ -10,8 +10,8 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/ring/kv"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const (
@@ -54,7 +54,7 @@ type RingConfig struct {
func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) {
hostname, err := os.Hostname()
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err)
os.Exit(1)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go
index 1fb7d65586c10..344c6d8f029b6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go
@@ -14,7 +14,7 @@ import (
"github.com/cortexproject/cortex/pkg/chunk"
"github.com/cortexproject/cortex/pkg/ruler/rules"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// Object Rule Storage Schema
@@ -49,7 +49,7 @@ func NewRuleStore(client chunk.ObjectClient, loadConcurrency int) *RuleStore {
func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string, rg *rules.RuleGroupDesc) (*rules.RuleGroupDesc, error) {
reader, err := o.client.GetObject(ctx, objectKey)
if err == chunk.ErrStorageObjectNotFound {
- level.Debug(util.Logger).Log("msg", "rule group does not exist", "name", objectKey)
+ level.Debug(util_log.Logger).Log("msg", "rule group does not exist", "name", objectKey)
return nil, rules.ErrGroupNotFound
}
@@ -139,10 +139,10 @@ func (o *RuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string]
key := generateRuleObjectKey(user, namespace, group)
- level.Debug(util.Logger).Log("msg", "loading rule group", "key", key, "user", user)
+ level.Debug(util_log.Logger).Log("msg", "loading rule group", "key", key, "user", user)
gr, err := o.getRuleGroup(gCtx, key, gr) // reuse group pointer from the map.
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to get rule group", "key", key, "user", user)
+ level.Error(util_log.Logger).Log("msg", "failed to get rule group", "key", key, "user", user)
return err
}
@@ -227,10 +227,10 @@ func (o *RuleStore) DeleteNamespace(ctx context.Context, userID, namespace strin
}
for _, obj := range ruleGroupObjects {
- level.Debug(util.Logger).Log("msg", "deleting rule group", "namespace", namespace, "key", obj.Key)
+ level.Debug(util_log.Logger).Log("msg", "deleting rule group", "namespace", namespace, "key", obj.Key)
err = o.client.DeleteObject(ctx, obj.Key)
if err != nil {
- level.Error(util.Logger).Log("msg", "unable to delete rule group from namespace", "err", err, "namespace", namespace, "key", obj.Key)
+ level.Error(util_log.Logger).Log("msg", "unable to delete rule group from namespace", "err", err, "namespace", namespace, "key", obj.Key)
return err
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go
index 51dab86036f7e..2f17bbe737eec 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go
@@ -28,6 +28,11 @@ func newS3Config(cfg Config) s3.Config {
IdleConnTimeout: model.Duration(cfg.HTTP.IdleConnTimeout),
ResponseHeaderTimeout: model.Duration(cfg.HTTP.ResponseHeaderTimeout),
InsecureSkipVerify: cfg.HTTP.InsecureSkipVerify,
+ TLSHandshakeTimeout: model.Duration(cfg.HTTP.TLSHandshakeTimeout),
+ ExpectContinueTimeout: model.Duration(cfg.HTTP.ExpectContinueTimeout),
+ MaxIdleConns: cfg.HTTP.MaxIdleConns,
+ MaxIdleConnsPerHost: cfg.HTTP.MaxIdleConnsPerHost,
+ MaxConnsPerHost: cfg.HTTP.MaxConnsPerHost,
Transport: cfg.HTTP.Transport,
},
// Enforce signature version 2 if CLI flag is set
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go
index 96db7e1f0c125..17d2f77f7da3f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go
@@ -27,6 +27,11 @@ type HTTPConfig struct {
IdleConnTimeout time.Duration `yaml:"idle_conn_timeout"`
ResponseHeaderTimeout time.Duration `yaml:"response_header_timeout"`
InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
+ TLSHandshakeTimeout time.Duration `yaml:"tls_handshake_timeout"`
+ ExpectContinueTimeout time.Duration `yaml:"expect_continue_timeout"`
+ MaxIdleConns int `yaml:"max_idle_connections"`
+ MaxIdleConnsPerHost int `yaml:"max_idle_connections_per_host"`
+ MaxConnsPerHost int `yaml:"max_connections_per_host"`
// Allow upstream callers to inject a round tripper
Transport http.RoundTripper `yaml:"-"`
@@ -37,6 +42,11 @@ func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.DurationVar(&cfg.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.")
f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.")
f.BoolVar(&cfg.InsecureSkipVerify, prefix+"s3.http.insecure-skip-verify", false, "If the client connects to S3 via HTTPS and this option is enabled, the client will accept any certificate and hostname.")
+ f.DurationVar(&cfg.TLSHandshakeTimeout, prefix+"s3.tls-handshake-timeout", 10*time.Second, "Maximum time to wait for a TLS handshake. 0 means no limit.")
+ f.DurationVar(&cfg.ExpectContinueTimeout, prefix+"s3.expect-continue-timeout", 1*time.Second, "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.")
+ f.IntVar(&cfg.MaxIdleConns, prefix+"s3.max-idle-connections", 100, "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.")
+ f.IntVar(&cfg.MaxIdleConnsPerHost, prefix+"s3.max-idle-connections-per-host", 100, "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.")
+ f.IntVar(&cfg.MaxConnsPerHost, prefix+"s3.max-connections-per-host", 0, "Maximum number of connections per host. 0 means no limit.")
}
// Config holds the config options for an S3 backend
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
index 0724fd6d160f1..11c635cd7d140 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
@@ -227,10 +227,9 @@ type BucketStoreConfig struct {
IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"`
BucketIndex BucketIndexConfig `yaml:"bucket_index"`
- // Controls whether index-header lazy loading is enabled. This config option is hidden
- // while it is marked as experimental.
- IndexHeaderLazyLoadingEnabled bool `yaml:"index_header_lazy_loading_enabled" doc:"hidden"`
- IndexHeaderLazyLoadingIdleTimeout time.Duration `yaml:"index_header_lazy_loading_idle_timeout" doc:"hidden"`
+ // Controls whether index-header lazy loading is enabled.
+ IndexHeaderLazyLoadingEnabled bool `yaml:"index_header_lazy_loading_enabled"`
+ IndexHeaderLazyLoadingIdleTimeout time.Duration `yaml:"index_header_lazy_loading_idle_timeout"`
// Controls what is the ratio of postings offsets store will hold in memory.
// Larger value will keep less offsets, which will increase CPU cycles needed for query touching those postings.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go
index a971aee7a55ad..5253b1f662853 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go
@@ -11,7 +11,7 @@ import (
"github.com/pkg/errors"
"github.com/thanos-io/thanos/pkg/objstore"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// Relative to user-specific prefix.
@@ -65,7 +65,7 @@ func ReadTenantDeletionMark(ctx context.Context, bkt objstore.BucketReader, user
// Close reader before dealing with decode error.
if closeErr := r.Close(); closeErr != nil {
- level.Warn(util.Logger).Log("msg", "failed to close bucket reader", "err", closeErr)
+ level.Warn(util_log.Logger).Log("msg", "failed to close bucket reader", "err", closeErr)
}
if err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go
index 182f5cefb69b7..db7716a5b5919 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go
@@ -330,7 +330,7 @@ func (g *StoreGateway) LabelValues(ctx context.Context, req *storepb.LabelValues
return g.stores.LabelValues(ctx, req)
}
-func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) {
+func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) {
// When we initialize the store-gateway instance in the ring we want to start from
// a clean situation, so whatever is the state we set it JOINING, while we keep existing
// tokens (if any) or the ones loaded from file.
@@ -350,7 +350,7 @@ func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc
func (g *StoreGateway) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {}
func (g *StoreGateway) OnRingInstanceStopping(_ *ring.BasicLifecycler) {}
-func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) {
+func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) {
}
func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go
index 01d466b4558a5..22ce3ebea643c 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go
@@ -6,7 +6,7 @@ import (
"github.com/go-kit/kit/log/level"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -32,7 +32,7 @@ func writeMessage(w http.ResponseWriter, message string) {
}{Message: message})
if err != nil {
- level.Error(util.Logger).Log("msg", "unable to serve store gateway ring page", "err", err)
+ level.Error(util_log.Logger).Log("msg", "unable to serve store gateway ring page", "err", err)
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go
index e977c84ca7630..8f7f30bc87d72 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go
@@ -10,8 +10,8 @@ import (
"github.com/cortexproject/cortex/pkg/ring"
"github.com/cortexproject/cortex/pkg/ring/kv"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const (
@@ -72,7 +72,7 @@ type RingConfig struct {
func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) {
hostname, err := os.Hostname()
if err != nil {
- level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err)
os.Exit(1)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/http.go b/vendor/github.com/cortexproject/cortex/pkg/util/http.go
index f02da30b8d1d9..f06363e537f0d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/http.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/http.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/json"
+ "flag"
"fmt"
"html/template"
"io"
@@ -19,6 +20,22 @@ import (
const messageSizeLargerErrFmt = "received message larger than max (%d vs %d)"
+// BasicAuth configures basic authentication for HTTP clients.
+type BasicAuth struct {
+ Username string `yaml:"basic_auth_username"`
+ Password string `yaml:"basic_auth_password"`
+}
+
+func (b *BasicAuth) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.StringVar(&b.Username, prefix+"basic-auth-username", "", "HTTP Basic authentication username. It overrides the username set in the URL (if any).")
+ f.StringVar(&b.Password, prefix+"basic-auth-password", "", "HTTP Basic authentication password. It overrides the password set in the URL (if any).")
+}
+
+// IsEnabled returns false if basic authentication isn't enabled.
+func (b BasicAuth) IsEnabled() bool {
+ return b.Username != "" || b.Password != ""
+}
+
// WriteJSONResponse writes some JSON as a HTTP response.
func WriteJSONResponse(w http.ResponseWriter, v interface{}) {
w.Header().Set("Content-Type", "application/json")
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log.go
deleted file mode 100644
index df0d464d45700..0000000000000
--- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package util
-
-import (
- "os"
-
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/weaveworks/common/logging"
- "github.com/weaveworks/common/server"
-
- logutil "github.com/cortexproject/cortex/pkg/util/log"
-)
-
-var (
- // Logger is a shared go-kit logger.
- // TODO: Change all components to take a non-global logger via their constructors.
- // Deprecated and moved to `pkg/util/log`. Prefer accepting a non-global logger as an argument.
- Logger = logutil.Logger
-
- logMessages = prometheus.NewCounterVec(prometheus.CounterOpts{
- Name: "log_messages_total",
- Help: "Total number of log messages.",
- }, []string{"level"})
-
- supportedLevels = []level.Value{
- level.DebugValue(),
- level.InfoValue(),
- level.WarnValue(),
- level.ErrorValue(),
- }
-)
-
-func init() {
- prometheus.MustRegister(logMessages)
-}
-
-// InitLogger initialises the global gokit logger (util.Logger) and overrides the
-// default logger for the server.
-func InitLogger(cfg *server.Config) {
- l, err := NewPrometheusLogger(cfg.LogLevel, cfg.LogFormat)
- if err != nil {
- panic(err)
- }
-
- // when use util.Logger, skip 3 stack frames.
- Logger = log.With(l, "caller", log.Caller(3))
-
- // cfg.Log wraps log function, skip 4 stack frames to get caller information.
- // this works in go 1.12, but doesn't work in versions earlier.
- // it will always shows the wrapper function generated by compiler
- // marked <autogenerated> in old versions.
- cfg.Log = logging.GoKit(log.With(l, "caller", log.Caller(4)))
-}
-
-// PrometheusLogger exposes Prometheus counters for each of go-kit's log levels.
-type PrometheusLogger struct {
- logger log.Logger
-}
-
-// NewPrometheusLogger creates a new instance of PrometheusLogger which exposes
-// Prometheus counters for various log levels.
-func NewPrometheusLogger(l logging.Level, format logging.Format) (log.Logger, error) {
- logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
- if format.String() == "json" {
- logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr))
- }
- logger = level.NewFilter(logger, l.Gokit)
-
- // Initialise counters for all supported levels:
- for _, level := range supportedLevels {
- logMessages.WithLabelValues(level.String())
- }
-
- logger = &PrometheusLogger{
- logger: logger,
- }
-
- // return a Logger without caller information, shouldn't use directly
- logger = log.With(logger, "ts", log.DefaultTimestampUTC)
- return logger, nil
-}
-
-// Log increments the appropriate Prometheus counter depending on the log level.
-func (pl *PrometheusLogger) Log(kv ...interface{}) error {
- pl.logger.Log(kv...)
- l := "unknown"
- for i := 1; i < len(kv); i += 2 {
- if v, ok := kv[i].(level.Value); ok {
- l = v.String()
- break
- }
- }
- logMessages.WithLabelValues(l).Inc()
- return nil
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go
index 2f146db276c84..92ea3f697d18f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go
@@ -7,6 +7,9 @@ import (
"github.com/go-kit/kit/log"
kitlog "github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/weaveworks/common/logging"
+ "github.com/weaveworks/common/server"
)
var (
@@ -14,8 +17,84 @@ var (
// TODO: Change all components to take a non-global logger via their constructors.
// Prefer accepting a non-global logger as an argument.
Logger = kitlog.NewNopLogger()
+
+ logMessages = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Name: "log_messages_total",
+ Help: "Total number of log messages.",
+ }, []string{"level"})
+
+ supportedLevels = []level.Value{
+ level.DebugValue(),
+ level.InfoValue(),
+ level.WarnValue(),
+ level.ErrorValue(),
+ }
)
+func init() {
+ prometheus.MustRegister(logMessages)
+}
+
+// InitLogger initialises the global gokit logger (util_log.Logger) and overrides the
+// default logger for the server.
+func InitLogger(cfg *server.Config) {
+ l, err := NewPrometheusLogger(cfg.LogLevel, cfg.LogFormat)
+ if err != nil {
+ panic(err)
+ }
+
+ // when use util_log.Logger, skip 3 stack frames.
+ Logger = log.With(l, "caller", log.Caller(3))
+
+ // cfg.Log wraps log function, skip 4 stack frames to get caller information.
+ // this works in go 1.12, but doesn't work in versions earlier.
+ // it will always shows the wrapper function generated by compiler
+ // marked <autogenerated> in old versions.
+ cfg.Log = logging.GoKit(log.With(l, "caller", log.Caller(4)))
+}
+
+// PrometheusLogger exposes Prometheus counters for each of go-kit's log levels.
+type PrometheusLogger struct {
+ logger log.Logger
+}
+
+// NewPrometheusLogger creates a new instance of PrometheusLogger which exposes
+// Prometheus counters for various log levels.
+func NewPrometheusLogger(l logging.Level, format logging.Format) (log.Logger, error) {
+ logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+ if format.String() == "json" {
+ logger = log.NewJSONLogger(log.NewSyncWriter(os.Stderr))
+ }
+ logger = level.NewFilter(logger, l.Gokit)
+
+ // Initialise counters for all supported levels:
+ for _, level := range supportedLevels {
+ logMessages.WithLabelValues(level.String())
+ }
+
+ logger = &PrometheusLogger{
+ logger: logger,
+ }
+
+ // return a Logger without caller information, shouldn't use directly
+ logger = log.With(logger, "ts", log.DefaultTimestampUTC)
+ return logger, nil
+}
+
+// Log increments the appropriate Prometheus counter depending on the log level.
+func (pl *PrometheusLogger) Log(kv ...interface{}) error {
+ pl.logger.Log(kv...)
+ l := "unknown"
+ for i := 1; i < len(kv); i += 2 {
+ if v, ok := kv[i].(level.Value); ok {
+ l = v.String()
+ break
+ }
+ }
+ logMessages.WithLabelValues(l).Inc()
+ return nil
+}
+
// CheckFatal prints an error and exits with error code 1 if err is non-nil
func CheckFatal(location string, err error) {
if err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
index e90b21dd18e7b..be413ee236ddf 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
@@ -11,6 +11,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/prometheus/pkg/labels"
+
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// Data for single value (counter/gauge) with labels.
@@ -583,7 +585,7 @@ func (r *UserRegistries) RemoveUserRegistry(user string, hard bool) {
func (r *UserRegistries) softRemoveUserRegistry(ur *UserRegistry) bool {
last, err := ur.reg.Gather()
if err != nil {
- level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err)
+ level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err)
return false
}
@@ -605,7 +607,7 @@ func (r *UserRegistries) softRemoveUserRegistry(ur *UserRegistry) bool {
ur.lastGather, err = NewMetricFamilyMap(last)
if err != nil {
- level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err)
+ level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err)
return false
}
@@ -656,7 +658,7 @@ func (r *UserRegistries) BuildMetricFamiliesPerUser() MetricFamiliesPerUser {
}
if err != nil {
- level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", entry.user, "err", err)
+ level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", entry.user, "err", err)
continue
}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go b/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go
index be12157ac2661..0d4fb43d1f2b4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go
@@ -7,6 +7,7 @@ import (
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -45,7 +46,7 @@ func (w *moduleService) start(serviceContext context.Context) error {
continue
}
- level.Debug(Logger).Log("msg", "module waiting for initialization", "module", w.name, "waiting_for", m)
+ level.Debug(util_log.Logger).Log("msg", "module waiting for initialization", "module", w.name, "waiting_for", m)
err := s.AwaitRunning(serviceContext)
if err != nil {
@@ -55,7 +56,7 @@ func (w *moduleService) start(serviceContext context.Context) error {
// we don't want to let this service to stop until all dependant services are stopped,
// so we use independent context here
- level.Info(Logger).Log("msg", "initialising", "module", w.name)
+ level.Info(util_log.Logger).Log("msg", "initialising", "module", w.name)
err := w.service.StartAsync(context.Background())
if err != nil {
return errors.Wrapf(err, "error starting module: %s", w.name)
@@ -77,7 +78,7 @@ func (w *moduleService) stop(_ error) error {
// Only wait for other modules, if underlying service is still running.
w.waitForModulesToStop()
- level.Debug(Logger).Log("msg", "stopping", "module", w.name)
+ level.Debug(util_log.Logger).Log("msg", "stopping", "module", w.name)
err = services.StopAndAwaitTerminated(context.Background(), w.service)
} else {
@@ -85,9 +86,9 @@ func (w *moduleService) stop(_ error) error {
}
if err != nil && err != ErrStopProcess {
- level.Warn(Logger).Log("msg", "module failed with error", "module", w.name, "err", err)
+ level.Warn(util_log.Logger).Log("msg", "module failed with error", "module", w.name, "err", err)
} else {
- level.Info(Logger).Log("msg", "module stopped", "module", w.name)
+ level.Info(util_log.Logger).Log("msg", "module stopped", "module", w.name)
}
return err
}
@@ -100,7 +101,7 @@ func (w *moduleService) waitForModulesToStop() {
continue
}
- level.Debug(Logger).Log("msg", "module waiting for", "module", w.name, "waiting_for", n)
+ level.Debug(util_log.Logger).Log("msg", "module waiting for", "module", w.name, "waiting_for", n)
// Passed context isn't canceled, so we can only get error here, if service
// fails. But we don't care *how* service stops, as long as it is done.
_ = s.AwaitTerminated(context.Background())
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/net.go b/vendor/github.com/cortexproject/cortex/pkg/util/net.go
index e0fa12e6ffaad..f4cd184870fc9 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/net.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/net.go
@@ -5,6 +5,8 @@ import (
"net"
"github.com/go-kit/kit/log/level"
+
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// GetFirstAddressOf returns the first IPv4 address of the supplied interface names.
@@ -12,17 +14,17 @@ func GetFirstAddressOf(names []string) (string, error) {
for _, name := range names {
inf, err := net.InterfaceByName(name)
if err != nil {
- level.Warn(Logger).Log("msg", "error getting interface", "inf", name, "err", err)
+ level.Warn(util_log.Logger).Log("msg", "error getting interface", "inf", name, "err", err)
continue
}
addrs, err := inf.Addrs()
if err != nil {
- level.Warn(Logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err)
+ level.Warn(util_log.Logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err)
continue
}
if len(addrs) <= 0 {
- level.Warn(Logger).Log("msg", "no addresses found for interface", "inf", name, "err", err)
+ level.Warn(util_log.Logger).Log("msg", "no addresses found for interface", "inf", name, "err", err)
continue
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go
index 71df1e30e0d28..6447508cde144 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go
@@ -16,7 +16,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -118,7 +118,7 @@ func (om *Manager) CloseListenerChannel(listener <-chan interface{}) {
func (om *Manager) loop(ctx context.Context) error {
if om.cfg.LoadPath == "" {
- level.Info(util.Logger).Log("msg", "runtime config disabled: file not specified")
+ level.Info(util_log.Logger).Log("msg", "runtime config disabled: file not specified")
<-ctx.Done()
return nil
}
@@ -132,7 +132,7 @@ func (om *Manager) loop(ctx context.Context) error {
err := om.loadConfig()
if err != nil {
// Log but don't stop on error - we don't want to halt all ingesters because of a typo
- level.Error(util.Logger).Log("msg", "failed to load config", "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to load config", "err", err)
}
case <-ctx.Done():
return nil
diff --git a/vendor/github.com/cortexproject/cortex/tools/querytee/instrumentation.go b/vendor/github.com/cortexproject/cortex/tools/querytee/instrumentation.go
index 8b6267786a779..1410ea748ccb2 100644
--- a/vendor/github.com/cortexproject/cortex/tools/querytee/instrumentation.go
+++ b/vendor/github.com/cortexproject/cortex/tools/querytee/instrumentation.go
@@ -10,7 +10,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
type InstrumentationServer struct {
@@ -44,7 +44,7 @@ func (s *InstrumentationServer) Start() error {
go func() {
if err := s.srv.Serve(listener); err != nil {
- level.Error(util.Logger).Log("msg", "metrics server terminated", "err", err)
+ level.Error(util_log.Logger).Log("msg", "metrics server terminated", "err", err)
}
}()
diff --git a/vendor/github.com/cortexproject/cortex/tools/querytee/proxy_endpoint.go b/vendor/github.com/cortexproject/cortex/tools/querytee/proxy_endpoint.go
index cde006481f7d8..b9628b4da4f9c 100644
--- a/vendor/github.com/cortexproject/cortex/tools/querytee/proxy_endpoint.go
+++ b/vendor/github.com/cortexproject/cortex/tools/querytee/proxy_endpoint.go
@@ -10,7 +10,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
type ResponsesComparator interface {
@@ -127,7 +127,7 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back
result := comparisonSuccess
err := p.compareResponses(expectedResponse, actualResponse)
if err != nil {
- level.Error(util.Logger).Log("msg", "response comparison failed", "route-name", p.routeName,
+ level.Error(util_log.Logger).Log("msg", "response comparison failed", "route-name", p.routeName,
"query", r.URL.RawQuery, "err", err)
result = comparisonFailed
}
diff --git a/vendor/github.com/cortexproject/cortex/tools/querytee/response_comparator.go b/vendor/github.com/cortexproject/cortex/tools/querytee/response_comparator.go
index 4da61cfcd53ae..913ae57967e62 100644
--- a/vendor/github.com/cortexproject/cortex/tools/querytee/response_comparator.go
+++ b/vendor/github.com/cortexproject/cortex/tools/querytee/response_comparator.go
@@ -9,7 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/common/model"
- "github.com/cortexproject/cortex/pkg/util"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
// SamplesComparatorFunc helps with comparing different types of samples coming from /api/v1/query and /api/v1/query_range routes.
@@ -109,7 +109,7 @@ func compareMatrix(expectedRaw, actualRaw json.RawMessage, tolerance float64) er
err := fmt.Errorf("expected %d samples for metric %s but got %d", expectedMetricLen,
expectedMetric.Metric, actualMetricLen)
if expectedMetricLen > 0 && actualMetricLen > 0 {
- level.Error(util.Logger).Log("msg", err.Error(), "oldest-expected-ts", expectedMetric.Values[0].Timestamp,
+ level.Error(util_log.Logger).Log("msg", err.Error(), "oldest-expected-ts", expectedMetric.Values[0].Timestamp,
"newest-expected-ts", expectedMetric.Values[expectedMetricLen-1].Timestamp,
"oldest-actual-ts", actualMetric.Values[0].Timestamp, "newest-actual-ts", actualMetric.Values[actualMetricLen-1].Timestamp)
}
diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s
index bfafa0ccfce89..7a3ead17eacfe 100644
--- a/vendor/github.com/golang/snappy/decode_arm64.s
+++ b/vendor/github.com/golang/snappy/decode_arm64.s
@@ -70,7 +70,7 @@ loop:
// x := uint32(src[s] >> 2)
// switch
MOVW $60, R1
- ADD R4>>2, ZR, R4
+ LSRW $2, R4, R4
CMPW R4, R1
BLS tagLit60Plus
@@ -111,13 +111,12 @@ doLit:
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
- MOVD $16, R1
- CMP R1, R4
- BGT callMemmove
- CMP R1, R2
- BLT callMemmove
- CMP R1, R3
- BLT callMemmove
+ CMP $16, R4
+ BGT callMemmove
+ CMP $16, R2
+ BLT callMemmove
+ CMP $16, R3
+ BLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
@@ -130,9 +129,8 @@ doLit:
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
-
- VLD1 0(R6), [V0.B16]
- VST1 [V0.B16], 0(R7)
+ LDP 0(R6), (R14, R15)
+ STP (R14, R15), 0(R7)
// d += length
// s += length
@@ -210,8 +208,7 @@ tagLit61:
B doLit
tagLit62Plus:
- MOVW $62, R1
- CMPW R1, R4
+ CMPW $62, R4
BHI tagLit63
// case x == 62:
@@ -273,10 +270,9 @@ tagCopy:
// We have a copy tag. We assume that:
// - R3 == src[s] & 0x03
// - R4 == src[s]
- MOVD $2, R1
- CMP R1, R3
- BEQ tagCopy2
- BGT tagCopy4
+ CMP $2, R3
+ BEQ tagCopy2
+ BGT tagCopy4
// case tagCopy1:
// s += 2
@@ -346,13 +342,11 @@ doCopy:
// }
// copy 16 bytes
// d += length
- MOVD $16, R1
- MOVD $8, R0
- CMP R1, R4
+ CMP $16, R4
BGT slowForwardCopy
- CMP R0, R5
+ CMP $8, R5
BLT slowForwardCopy
- CMP R1, R14
+ CMP $16, R14
BLT slowForwardCopy
MOVD 0(R15), R2
MOVD R2, 0(R7)
@@ -426,8 +420,7 @@ makeOffsetAtLeast8:
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
- MOVD $8, R1
- CMP R1, R5
+ CMP $8, R5
BGE fixUpSlowForwardCopy
MOVD (R15), R3
MOVD R3, (R7)
@@ -477,9 +470,7 @@ verySlowForwardCopy:
ADD $1, R15, R15
ADD $1, R7, R7
SUB $1, R4, R4
- MOVD $0, R1
- CMP R1, R4
- BNE verySlowForwardCopy
+ CBNZ R4, verySlowForwardCopy
B loop
// The code above handles copy tags.
diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s
index 1f565ee75f2c6..bf83667d711f7 100644
--- a/vendor/github.com/golang/snappy/encode_arm64.s
+++ b/vendor/github.com/golang/snappy/encode_arm64.s
@@ -35,11 +35,9 @@ TEXT ·emitLiteral(SB), NOSPLIT, $32-56
MOVW R3, R4
SUBW $1, R4, R4
- MOVW $60, R2
- CMPW R2, R4
+ CMPW $60, R4
BLT oneByte
- MOVW $256, R2
- CMPW R2, R4
+ CMPW $256, R4
BLT twoBytes
threeBytes:
@@ -98,8 +96,7 @@ TEXT ·emitCopy(SB), NOSPLIT, $0-48
loop0:
// for length >= 68 { etc }
- MOVW $68, R2
- CMPW R2, R3
+ CMPW $68, R3
BLT step1
// Emit a length 64 copy, encoded as 3 bytes.
@@ -112,9 +109,8 @@ loop0:
step1:
// if length > 64 { etc }
- MOVD $64, R2
- CMP R2, R3
- BLE step2
+ CMP $64, R3
+ BLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVD $0xee, R2
@@ -125,11 +121,9 @@ step1:
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
- MOVD $12, R2
- CMP R2, R3
+ CMP $12, R3
BGE step3
- MOVW $2048, R2
- CMPW R2, R11
+ CMPW $2048, R11
BGE step3
// Emit the remaining copy, encoded as 2 bytes.
@@ -295,27 +289,24 @@ varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
- // first tableSize elements. Each uint16 element is 2 bytes and each VST1
- // writes 64 bytes, so we can do only tableSize/32 writes instead of the
- // 2048 writes that would zero-initialize all of table's 32768 bytes.
- // This clear could overrun the first tableSize elements, but it won't
- // overrun the allocated stack size.
+ // first tableSize elements. Each uint16 element is 2 bytes and each
+ // iterations writes 64 bytes, so we can do only tableSize/32 writes
+ // instead of the 2048 writes that would zero-initialize all of table's
+ // 32768 bytes. This clear could overrun the first tableSize elements, but
+ // it won't overrun the allocated stack size.
ADD $128, RSP, R17
MOVD R17, R4
// !!! R6 = &src[tableSize]
ADD R6<<1, R17, R6
- // zero the SIMD registers
- VEOR V0.B16, V0.B16, V0.B16
- VEOR V1.B16, V1.B16, V1.B16
- VEOR V2.B16, V2.B16, V2.B16
- VEOR V3.B16, V3.B16, V3.B16
-
memclr:
- VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R4)
- CMP R4, R6
- BHI memclr
+ STP.P (ZR, ZR), 64(R4)
+ STP (ZR, ZR), -48(R4)
+ STP (ZR, ZR), -32(R4)
+ STP (ZR, ZR), -16(R4)
+ CMP R4, R6
+ BHI memclr
// !!! R6 = &src[0]
MOVD R7, R6
@@ -404,8 +395,7 @@ fourByteMatch:
// on inputMargin in encode.go.
MOVD R7, R3
SUB R10, R3, R3
- MOVD $16, R2
- CMP R2, R3
+ CMP $16, R3
BLE emitLiteralFastPath
// ----------------------------------------
@@ -454,18 +444,21 @@ inlineEmitLiteralMemmove:
MOVD R3, 24(RSP)
// Finish the "d +=" part of "d += emitLiteral(etc)".
- ADD R3, R8, R8
- MOVD R7, 80(RSP)
- MOVD R8, 88(RSP)
- MOVD R15, 120(RSP)
- CALL runtime·memmove(SB)
- MOVD 64(RSP), R5
- MOVD 72(RSP), R6
- MOVD 80(RSP), R7
- MOVD 88(RSP), R8
- MOVD 96(RSP), R9
- MOVD 120(RSP), R15
- B inner1
+ ADD R3, R8, R8
+ MOVD R7, 80(RSP)
+ MOVD R8, 88(RSP)
+ MOVD R15, 120(RSP)
+ CALL runtime·memmove(SB)
+ MOVD 64(RSP), R5
+ MOVD 72(RSP), R6
+ MOVD 80(RSP), R7
+ MOVD 88(RSP), R8
+ MOVD 96(RSP), R9
+ MOVD 120(RSP), R15
+ ADD $128, RSP, R17
+ MOVW $0xa7bd, R16
+ MOVKW $(0x1e35<<16), R16
+ B inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
@@ -489,9 +482,9 @@ emitLiteralFastPath:
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
- VLD1 0(R10), [V0.B16]
- VST1 [V0.B16], 0(R8)
- ADD R3, R8, R8
+ LDP 0(R10), (R0, R1)
+ STP (R0, R1), 0(R8)
+ ADD R3, R8, R8
inner1:
// for { etc }
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
index 3a05e97eb34b9..fd0abe3b48921 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
@@ -35,6 +35,8 @@ import (
"github.com/thanos-io/thanos/pkg/runutil"
)
+const FetcherConcurrency = 32
+
type fetcherMetrics struct {
syncs prometheus.Counter
syncFailures prometheus.Counter
@@ -301,6 +303,7 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) {
ch = make(chan ulid.ULID, f.concurrency)
mtx sync.Mutex
)
+ level.Debug(f.logger).Log("msg", "fetching meta data", "concurrency", f.concurrency)
for i := 0; i < f.concurrency; i++ {
eg.Go(func() error {
for id := range ch {
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go
index e9b9dc20bdc6b..d4b9dee03b679 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go
@@ -23,6 +23,11 @@ import (
"github.com/thanos-io/thanos/pkg/objstore"
)
+var (
+ errNotIdle = errors.New("the reader is not idle")
+ errUnloadedWhileLoading = errors.New("the index-header has been concurrently unloaded")
+)
+
// LazyBinaryReaderMetrics holds metrics tracked by LazyBinaryReader.
type LazyBinaryReaderMetrics struct {
loadCount prometheus.Counter
@@ -133,7 +138,8 @@ func (r *LazyBinaryReader) Close() error {
defer r.onClosed(r)
}
- return r.unload()
+ // Unload without checking if idle.
+ return r.unloadIfIdleSince(0)
}
// IndexVersion implements Reader.
@@ -203,7 +209,7 @@ func (r *LazyBinaryReader) LabelNames() ([]string, error) {
// load ensures the underlying binary index-header reader has been successfully loaded. Returns
// an error on failure. This function MUST be called with the read lock already acquired.
-func (r *LazyBinaryReader) load() error {
+func (r *LazyBinaryReader) load() (returnErr error) {
// Nothing to do if we already tried loading it.
if r.reader != nil {
return nil
@@ -216,8 +222,16 @@ func (r *LazyBinaryReader) load() error {
// the read lock once done.
r.readerMx.RUnlock()
r.readerMx.Lock()
- defer r.readerMx.RLock()
- defer r.readerMx.Unlock()
+ defer func() {
+ r.readerMx.Unlock()
+ r.readerMx.RLock()
+
+ // Between the write unlock and the subsequent read lock, the unload() may have run,
+ // so we make sure to catch this edge case.
+ if returnErr == nil && r.reader == nil {
+ returnErr = errUnloadedWhileLoading
+ }
+ }()
// Ensure none else tried to load it in the meanwhile.
if r.reader != nil {
@@ -245,19 +259,22 @@ func (r *LazyBinaryReader) load() error {
return nil
}
-// unload closes underlying BinaryReader. Calling this function on a already unloaded reader is a no-op.
-func (r *LazyBinaryReader) unload() error {
- // Always update the used timestamp so that the pool will not call unload() again until the next
- // idle timeout is hit.
- r.usedAt.Store(time.Now().UnixNano())
-
+// unloadIfIdleSince closes underlying BinaryReader if the reader is idle since given time (as unix nano). If idleSince is 0,
+// the check on the last usage is skipped. Calling this function on a already unloaded reader is a no-op.
+func (r *LazyBinaryReader) unloadIfIdleSince(ts int64) error {
r.readerMx.Lock()
defer r.readerMx.Unlock()
+ // Nothing to do if already unloaded.
if r.reader == nil {
return nil
}
+ // Do not unloadIfIdleSince if not idle.
+ if ts > 0 && r.usedAt.Load() > ts {
+ return errNotIdle
+ }
+
r.metrics.unloadCount.Inc()
if err := r.reader.Close(); err != nil {
r.metrics.unloadFailedCount.Inc()
@@ -268,6 +285,16 @@ func (r *LazyBinaryReader) unload() error {
return nil
}
-func (r *LazyBinaryReader) lastUsedAt() int64 {
- return r.usedAt.Load()
+// isIdleSince returns true if the reader is idle since given time (as unix nano).
+func (r *LazyBinaryReader) isIdleSince(ts int64) bool {
+ if r.usedAt.Load() > ts {
+ return false
+ }
+
+ // A reader can be considered idle only if it's loaded.
+ r.readerMx.RLock()
+ loaded := r.reader != nil
+ r.readerMx.RUnlock()
+
+ return loaded
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
index 660ae4853a394..93f1fd88b371a 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go
@@ -11,6 +11,7 @@ import (
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/oklog/ulid"
+ "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/thanos-io/thanos/pkg/objstore"
@@ -98,29 +99,22 @@ func (p *ReaderPool) Close() {
}
func (p *ReaderPool) closeIdleReaders() {
- for _, r := range p.getIdleReaders() {
- // Closing an already closed reader is a no-op, so we close it and just update
- // the last timestamp on success. If it will be still be idle the next time this
- // function is called, we'll try to close it again and will just be a no-op.
- //
- // Due to concurrency, the current implementation may close a reader which was
- // use between when the list of idle readers has been computed and now. This is
- // an edge case we're willing to accept, to not further complicate the logic.
- if err := r.unload(); err != nil {
+ idleTimeoutAgo := time.Now().Add(-p.lazyReaderIdleTimeout).UnixNano()
+
+ for _, r := range p.getIdleReadersSince(idleTimeoutAgo) {
+ if err := r.unloadIfIdleSince(idleTimeoutAgo); err != nil && !errors.Is(err, errNotIdle) {
level.Warn(p.logger).Log("msg", "failed to close idle index-header reader", "err", err)
}
}
}
-func (p *ReaderPool) getIdleReaders() []*LazyBinaryReader {
+func (p *ReaderPool) getIdleReadersSince(ts int64) []*LazyBinaryReader {
p.lazyReadersMx.Lock()
defer p.lazyReadersMx.Unlock()
var idle []*LazyBinaryReader
- threshold := time.Now().Add(-p.lazyReaderIdleTimeout).UnixNano()
-
for r := range p.lazyReaders {
- if r.lastUsedAt() < threshold {
+ if r.isIdleSince(ts) {
idle = append(idle, r)
}
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
index 1046057039c2e..e27478deda862 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
@@ -450,7 +450,7 @@ func (cg *Group) Resolution() int64 {
// Planner returns blocks to compact.
type Planner interface {
- // Plan returns a block directories of blocks that should be compacted into single one.
+ // Plan returns a list of blocks that should be compacted into single one.
// The blocks can be overlapping. The provided metadata has to be ordered by minTime.
Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error)
}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go
similarity index 100%
rename from vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go
rename to vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go
diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go
index eb679679805b3..edbe49d424941 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go
@@ -51,6 +51,11 @@ var DefaultConfig = Config{
HTTPConfig: HTTPConfig{
IdleConnTimeout: model.Duration(90 * time.Second),
ResponseHeaderTimeout: model.Duration(2 * time.Minute),
+ TLSHandshakeTimeout: model.Duration(10 * time.Second),
+ ExpectContinueTimeout: model.Duration(1 * time.Second),
+ MaxIdleConns: 100,
+ MaxIdleConnsPerHost: 100,
+ MaxConnsPerHost: 0,
},
// Minimum file size after which an HTTP multipart request should be used to upload objects to storage.
// Set to 128 MiB as in the minio client.
@@ -94,6 +99,12 @@ type HTTPConfig struct {
ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"`
InsecureSkipVerify bool `yaml:"insecure_skip_verify"`
+ TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"`
+ ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"`
+ MaxIdleConns int `yaml:"max_idle_conns"`
+ MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"`
+ MaxConnsPerHost int `yaml:"max_conns_per_host"`
+
// Allow upstream callers to inject a round tripper
Transport http.RoundTripper `yaml:"-"`
}
@@ -111,11 +122,12 @@ func DefaultTransport(config Config) *http.Transport {
DualStack: true,
}).DialContext,
- MaxIdleConns: 100,
- MaxIdleConnsPerHost: 100,
+ MaxIdleConns: config.HTTPConfig.MaxIdleConns,
+ MaxIdleConnsPerHost: config.HTTPConfig.MaxIdleConnsPerHost,
IdleConnTimeout: time.Duration(config.HTTPConfig.IdleConnTimeout),
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
+ MaxConnsPerHost: config.HTTPConfig.MaxConnsPerHost,
+ TLSHandshakeTimeout: time.Duration(config.HTTPConfig.TLSHandshakeTimeout),
+ ExpectContinueTimeout: time.Duration(config.HTTPConfig.ExpectContinueTimeout),
// A custom ResponseHeaderTimeout was introduced
// to cover cases where the tcp connection works but
// the server never answers. Defaults to 2 minutes.
diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go
index 9372382923e0d..fbc832ed7bc9d 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go
@@ -39,9 +39,6 @@ var DefaultConfig = Config{
Timeout: model.Duration(5 * time.Minute),
}
-// TODO(FUSAKLA): Added to avoid breaking dependency of Cortex which uses the original struct name SwiftConfig.
-type SwiftConfig = Config
-
type Config struct {
AuthVersion int `yaml:"auth_version"`
AuthUrl string `yaml:"auth_url"`
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 34534e5f15ccd..58dbe93607bcb 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -164,7 +164,7 @@ github.com/coreos/go-systemd/journal
github.com/coreos/go-systemd/sdjournal
# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
github.com/coreos/pkg/capnslog
-# github.com/cortexproject/cortex v1.6.1-0.20210129172402-0976147451ee
+# github.com/cortexproject/cortex v1.6.1-0.20210204145131-7dac81171c66
## explicit
github.com/cortexproject/cortex/pkg/alertmanager
github.com/cortexproject/cortex/pkg/alertmanager/alerts
@@ -455,7 +455,7 @@ github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/empty
github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
-# github.com/golang/snappy v0.0.2
+# github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3
## explicit
github.com/golang/snappy
# github.com/google/btree v1.0.0
@@ -856,7 +856,7 @@ github.com/stretchr/objx
github.com/stretchr/testify/assert
github.com/stretchr/testify/mock
github.com/stretchr/testify/require
-# github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51
+# github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe
github.com/thanos-io/thanos/pkg/block
github.com/thanos-io/thanos/pkg/block/indexheader
github.com/thanos-io/thanos/pkg/block/metadata
|
chore
|
update cortex to latest and fix refs (#3295)
|
0076bbdb4255c4eda7c70cabd8a0ce079e9686b3
|
2024-07-05 16:38:30
|
Cyril Tovena
|
chore: Refactor storage interface for rf1 (#13415)
| false
|
diff --git a/pkg/ingester-rf1/flush.go b/pkg/ingester-rf1/flush.go
index d46619575eeae..37b24f6f1abff 100644
--- a/pkg/ingester-rf1/flush.go
+++ b/pkg/ingester-rf1/flush.go
@@ -1,6 +1,7 @@
package ingesterrf1
import (
+ "crypto/rand"
"fmt"
"net/http"
"time"
@@ -9,14 +10,14 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/ring"
- "github.com/prometheus/client_golang/prometheus"
+ "github.com/grafana/dskit/runutil"
+ "github.com/oklog/ulid"
"github.com/prometheus/common/model"
"golang.org/x/net/context"
- "github.com/grafana/loki/v3/pkg/chunkenc"
- "github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util"
+ util_log "github.com/grafana/loki/v3/pkg/util/log"
)
const (
@@ -140,7 +141,12 @@ func (i *Ingester) flushOp(l log.Logger, flushCtx *flushCtx) error {
// If the flush isn't successful, the operation for this userID is requeued allowing this and all other unflushed
// segments to have another opportunity to be flushed.
func (i *Ingester) flushSegment(ctx context.Context, ch *wal.SegmentWriter) error {
- if err := i.store.PutWal(ctx, ch); err != nil {
+ reader := ch.Reader()
+ defer runutil.CloseWithLogOnErr(util_log.Logger, reader, "flushSegment")
+
+ newUlid := ulid.MustNew(ulid.Timestamp(time.Now()), rand.Reader)
+
+ if err := i.store.PutObject(ctx, fmt.Sprintf("loki-v2/wal/anon/"+newUlid.String()), reader); err != nil {
i.metrics.chunksFlushFailures.Inc()
return fmt.Errorf("store put chunk: %w", err)
}
@@ -148,39 +154,3 @@ func (i *Ingester) flushSegment(ctx context.Context, ch *wal.SegmentWriter) erro
// TODO: report some flush metrics
return nil
}
-
-// reportFlushedChunkStatistics calculate overall statistics of flushed chunks without compromising the flush process.
-func (i *Ingester) reportFlushedChunkStatistics(ch *chunk.Chunk, desc *chunkDesc, sizePerTenant prometheus.Counter, countPerTenant prometheus.Counter, reason string) {
- byt, err := ch.Encoded()
- if err != nil {
- level.Error(i.logger).Log("msg", "failed to encode flushed wire chunk", "err", err)
- return
- }
-
- i.metrics.chunksFlushedPerReason.WithLabelValues(reason).Add(1)
-
- compressedSize := float64(len(byt))
- uncompressedSize, ok := chunkenc.UncompressedSize(ch.Data)
-
- if ok && compressedSize > 0 {
- i.metrics.chunkCompressionRatio.Observe(float64(uncompressedSize) / compressedSize)
- }
-
- utilization := ch.Data.Utilization()
- i.metrics.chunkUtilization.Observe(utilization)
- numEntries := desc.chunk.Size()
- i.metrics.chunkEntries.Observe(float64(numEntries))
- i.metrics.chunkSize.Observe(compressedSize)
- sizePerTenant.Add(compressedSize)
- countPerTenant.Inc()
-
- boundsFrom, boundsTo := desc.chunk.Bounds()
- i.metrics.chunkAge.Observe(time.Since(boundsFrom).Seconds())
- i.metrics.chunkLifespan.Observe(boundsTo.Sub(boundsFrom).Hours())
-
- i.metrics.flushedChunksBytesStats.Record(compressedSize)
- i.metrics.flushedChunksLinesStats.Record(float64(numEntries))
- i.metrics.flushedChunksUtilizationStats.Record(utilization)
- i.metrics.flushedChunksAgeStats.Record(time.Since(boundsFrom).Seconds())
- i.metrics.flushedChunksLifespanStats.Record(boundsTo.Sub(boundsFrom).Seconds())
-}
diff --git a/pkg/ingester-rf1/ingester.go b/pkg/ingester-rf1/ingester.go
index fef8418945f3c..d87159952e04b 100644
--- a/pkg/ingester-rf1/ingester.go
+++ b/pkg/ingester-rf1/ingester.go
@@ -4,6 +4,7 @@ import (
"context"
"flag"
"fmt"
+ "io"
"math/rand"
"net/http"
"os"
@@ -16,9 +17,10 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/grafana/loki/v3/pkg/ingester-rf1/clientpool"
+ "github.com/grafana/loki/v3/pkg/ingester-rf1/objstore"
"github.com/grafana/loki/v3/pkg/ingester/index"
"github.com/grafana/loki/v3/pkg/loghttp/push"
- lokilog "github.com/grafana/loki/v3/pkg/logql/log"
+ "github.com/grafana/loki/v3/pkg/storage"
"github.com/grafana/loki/v3/pkg/storage/types"
"github.com/grafana/loki/v3/pkg/storage/wal"
util_log "github.com/grafana/loki/v3/pkg/util/log"
@@ -35,20 +37,13 @@ import (
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/health/grpc_health_v1"
- server_util "github.com/grafana/loki/v3/pkg/util/server"
-
"github.com/grafana/loki/v3/pkg/analytics"
"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
- "github.com/grafana/loki/v3/pkg/logql/syntax"
"github.com/grafana/loki/v3/pkg/runtime"
- "github.com/grafana/loki/v3/pkg/storage"
- "github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/config"
- "github.com/grafana/loki/v3/pkg/storage/stores"
- indexstore "github.com/grafana/loki/v3/pkg/storage/stores/index"
"github.com/grafana/loki/v3/pkg/util"
)
@@ -121,7 +116,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.FlushOpBackoff.MaxRetries, "ingester-rf1.flush-op-backoff-retries", 10, "Maximum retries for failed flushes.")
f.DurationVar(&cfg.FlushOpTimeout, "ingester-rf1.flush-op-timeout", 10*time.Minute, "The timeout for an individual flush. Will be retried up to `flush-op-backoff-retries` times.")
f.DurationVar(&cfg.RetainPeriod, "ingester-rf1.chunks-retain-period", 0, "How long chunks should be retained in-memory after they've been flushed.")
- //f.DurationVar(&cfg.MaxChunkIdle, "ingester-rf1.chunks-idle-period", 30*time.Minute, "How long chunks should sit in-memory with no updates before being flushed if they don't hit the max block size. This means that half-empty chunks will still be flushed after a certain period as long as they receive no further activity.")
+ // f.DurationVar(&cfg.MaxChunkIdle, "ingester-rf1.chunks-idle-period", 30*time.Minute, "How long chunks should sit in-memory with no updates before being flushed if they don't hit the max block size. This means that half-empty chunks will still be flushed after a certain period as long as they receive no further activity.")
f.IntVar(&cfg.BlockSize, "ingester-rf1.chunks-block-size", 256*1024, "The targeted _uncompressed_ size in bytes of a chunk block When this threshold is exceeded the head block will be cut and compressed inside the chunk.")
f.IntVar(&cfg.TargetChunkSize, "ingester-rf1.chunk-target-size", 1572864, "A target _compressed_ size in bytes for chunks. This is a desired size not an exact size, chunks may be slightly bigger or significantly smaller if they get flushed for other reasons (e.g. chunk_idle_period). A value of 0 creates chunks with a fixed 10 blocks, a non zero value will create chunks with a variable number of blocks to meet the target size.") // 1.5 MB
f.StringVar(&cfg.ChunkEncoding, "ingester-rf1.chunk-encoding", chunkenc.EncGZIP.String(), fmt.Sprintf("The algorithm to use for compressing chunk. (%s)", chunkenc.SupportedEncoding()))
@@ -159,13 +154,10 @@ type Wrapper interface {
Wrap(wrapped Interface) Interface
}
-// Store is the store interface we need on the ingester.
-type Store interface {
- stores.ChunkWriter
- stores.ChunkFetcher
- storage.SelectStore
- storage.SchemaConfigProvider
- indexstore.StatsReader
+// Storage is the store interface we need on the ingester.
+type Storage interface {
+ PutObject(ctx context.Context, objectKey string, object io.Reader) error
+ Stop()
}
// Interface is an interface for the Ingester
@@ -174,8 +166,6 @@ type Interface interface {
http.Handler
logproto.PusherServer
- //logproto.QuerierServer
- //logproto.StreamDataServer
CheckReady(ctx context.Context) error
FlushHandler(w http.ResponseWriter, _ *http.Request)
@@ -218,7 +208,7 @@ type Ingester struct {
lifecycler *ring.Lifecycler
lifecyclerWatcher *services.FailureWatcher
- store Store
+ store Storage
periodicConfigs []config.PeriodConfig
loopDone sync.WaitGroup
@@ -240,14 +230,10 @@ type Ingester struct {
terminateOnShutdown bool
// Only used by WAL & flusher to coordinate backpressure during replay.
- //replayController *replayController
+ // replayController *replayController
metrics *ingesterMetrics
- chunkFilter chunk.RequestChunkFilterer
- extractorWrapper lokilog.SampleExtractorWrapper
- pipelineWrapper lokilog.PipelineWrapper
-
streamRateCalculator *StreamRateCalculator
writeLogManager *writefailures.Manager
@@ -256,11 +242,25 @@ type Ingester struct {
// recalculateOwnedStreams periodically checks the ring for changes and recalculates owned streams for each instance.
readRing ring.ReadRing
- //recalculateOwnedStreams *recalculateOwnedStreams
+ // recalculateOwnedStreams *recalculateOwnedStreams
}
// New makes a new Ingester.
-func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker, readRing ring.ReadRing) (*Ingester, error) {
+func New(cfg Config, clientConfig client.Config,
+ periodConfigs []config.PeriodConfig,
+ storageConfig storage.Config,
+ clientMetrics storage.ClientMetrics,
+ limits Limits, configs *runtime.TenantConfigs,
+ registerer prometheus.Registerer,
+ writeFailuresCfg writefailures.Cfg,
+ metricsNamespace string,
+ logger log.Logger,
+ customStreamsTracker push.UsageTracker, readRing ring.ReadRing,
+) (*Ingester, error) {
+ storage, err := objstore.New(periodConfigs, storageConfig, clientMetrics)
+ if err != nil {
+ return nil, err
+ }
if cfg.ingesterClientFactory == nil {
cfg.ingesterClientFactory = client.New
}
@@ -279,13 +279,13 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
clientConfig: clientConfig,
tenantConfigs: configs,
instances: map[string]*instance{},
- store: store,
- periodicConfigs: store.GetSchemaConfigs(),
+ store: storage,
+ periodicConfigs: periodConfigs,
loopQuit: make(chan struct{}),
flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes),
tailersQuit: make(chan struct{}),
metrics: metrics,
- //flushOnShutdownSwitch: &OnceSwitch{},
+ // flushOnShutdownSwitch: &OnceSwitch{},
terminateOnShutdown: false,
streamRateCalculator: NewStreamRateCalculator(),
writeLogManager: writefailures.NewManager(logger, registerer, writeFailuresCfg, configs, "ingester_rf1"),
@@ -298,7 +298,6 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
segmentWriter: segmentWriter,
},
}
- //i.replayController = newReplayController(metrics, cfg.WAL, &replayFlusher{i})
// TODO: change flush on shutdown
i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester-rf1", "ingester-rf1-ring", true, logger, prometheus.WrapRegistererWithPrefix(metricsNamespace+"_", registerer))
@@ -334,18 +333,6 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
return i, nil
}
-func (i *Ingester) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) {
- i.chunkFilter = chunkFilter
-}
-
-func (i *Ingester) SetExtractorWrapper(wrapper lokilog.SampleExtractorWrapper) {
- i.extractorWrapper = wrapper
-}
-
-func (i *Ingester) SetPipelineWrapper(wrapper lokilog.PipelineWrapper) {
- i.pipelineWrapper = wrapper
-}
-
// setupAutoForget looks for ring status if `AutoForgetUnhealthy` is enabled
// when enabled, unhealthy ingesters that reach `ring.kvstore.heartbeat_timeout` are removed from the ring every `HeartbeatPeriod`
func (i *Ingester) setupAutoForget() {
@@ -490,7 +477,7 @@ func (i *Ingester) running(ctx context.Context) error {
func (i *Ingester) stopping(_ error) error {
i.stopIncomingRequests()
var errs util.MultiError
- //errs.Add(i.wal.Stop())
+ // errs.Add(i.wal.Stop())
//if i.flushOnShutdownSwitch.Get() {
// i.lifecycler.SetFlushOnShutdown(true)
@@ -502,7 +489,7 @@ func (i *Ingester) stopping(_ error) error {
}
i.flushQueuesDone.Wait()
- //i.streamRateCalculator.Stop()
+ // i.streamRateCalculator.Stop()
// In case the flag to terminate on shutdown is set or this instance is marked to release its resources,
// we need to mark the ingester service as "failed", so Loki will shut down entirely.
@@ -511,6 +498,7 @@ func (i *Ingester) stopping(_ error) error {
i.removeShutdownMarkerFile()
return modules.ErrStopProcess
}
+ i.store.Stop()
return errs.Err()
}
@@ -581,7 +569,7 @@ func (i *Ingester) loop() {
func (i *Ingester) doFlushTick() {
i.flushCtx.lock.Lock()
- //i.logger.Log("msg", "starting periodic flush")
+ // i.logger.Log("msg", "starting periodic flush")
// Stop new chunks being written while we swap destinations - we'll never unlock as this flushctx can no longer be used.
currentFlushCtx := i.flushCtx
@@ -708,7 +696,7 @@ func createShutdownMarker(p string) error {
return err
}
- dir, err := os.OpenFile(path.Dir(p), os.O_RDONLY, 0777)
+ dir, err := os.OpenFile(path.Dir(p), os.O_RDONLY, 0o777)
if err != nil {
return err
}
@@ -725,7 +713,7 @@ func removeShutdownMarker(p string) error {
return err
}
- dir, err := os.OpenFile(path.Dir(p), os.O_RDONLY, 0777)
+ dir, err := os.OpenFile(path.Dir(p), os.O_RDONLY, 0o777)
if err != nil {
return err
}
@@ -811,7 +799,7 @@ func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logpro
// Fetch a flush context and try to acquire the RLock
// The only time the Write Lock is held is when this context is no longer usable and a new one is being created.
// In this case, we need to re-read i.flushCtx in order to fetch the new one as soon as it's available.
- //The newCtxAvailable chan is closed as soon as the new one is available to avoid a busy loop.
+ // The newCtxAvailable chan is closed as soon as the new one is available to avoid a busy loop.
currentFlushCtx := i.flushCtx
for !currentFlushCtx.lock.TryRLock() {
select {
@@ -863,7 +851,7 @@ func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { /
inst, ok = i.instances[instanceID]
if !ok {
var err error
- inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.metrics, i.chunkFilter, i.pipelineWrapper, i.extractorWrapper, i.streamRateCalculator, i.writeLogManager, i.customStreamsTracker)
+ inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.metrics, i.streamRateCalculator, i.writeLogManager, i.customStreamsTracker)
if err != nil {
return nil, err
}
@@ -894,62 +882,6 @@ func (i *Ingester) asyncStoreMaxLookBack() time.Duration {
return maxLookBack
}
-// GetChunkIDs is meant to be used only when using an async store like boltdb-shipper or tsdb.
-func (i *Ingester) GetChunkIDs(ctx context.Context, req *logproto.GetChunkIDsRequest) (*logproto.GetChunkIDsResponse, error) {
- gcr, err := i.getChunkIDs(ctx, req)
- err = server_util.ClientGrpcStatusAndError(err)
- return gcr, err
-}
-
-// GetChunkIDs is meant to be used only when using an async store like boltdb-shipper or tsdb.
-func (i *Ingester) getChunkIDs(ctx context.Context, req *logproto.GetChunkIDsRequest) (*logproto.GetChunkIDsResponse, error) {
- orgID, err := tenant.TenantID(ctx)
- if err != nil {
- return nil, err
- }
-
- // Set profiling tags
- defer pprof.SetGoroutineLabels(ctx)
- ctx = pprof.WithLabels(ctx, pprof.Labels("path", "read", "type", "chunkIDs", "tenant", orgID))
- pprof.SetGoroutineLabels(ctx)
-
- asyncStoreMaxLookBack := i.asyncStoreMaxLookBack()
- if asyncStoreMaxLookBack == 0 {
- return &logproto.GetChunkIDsResponse{}, nil
- }
-
- reqStart := req.Start
- reqStart = adjustQueryStartTime(asyncStoreMaxLookBack, reqStart, time.Now())
-
- // parse the request
- start, end := util.RoundToMilliseconds(reqStart, req.End)
- matchers, err := syntax.ParseMatchers(req.Matchers, true)
- if err != nil {
- return nil, err
- }
-
- // get chunk references
- chunksGroups, _, err := i.store.GetChunks(ctx, orgID, start, end, chunk.NewPredicate(matchers, nil), nil)
- if err != nil {
- return nil, err
- }
-
- // todo (Callum) ingester should maybe store the whole schema config?
- s := config.SchemaConfig{
- Configs: i.periodicConfigs,
- }
-
- // build the response
- resp := logproto.GetChunkIDsResponse{ChunkIDs: []string{}}
- for _, chunks := range chunksGroups {
- for _, chk := range chunks {
- resp.ChunkIDs = append(resp.ChunkIDs, s.ExternalKey(chk.ChunkRef))
- }
- }
-
- return &resp, nil
-}
-
// Watch implements grpc_health_v1.HealthCheck.
func (*Ingester) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error {
return nil
diff --git a/pkg/ingester-rf1/instance.go b/pkg/ingester-rf1/instance.go
index e5c54549713af..16a6758f4ec83 100644
--- a/pkg/ingester-rf1/instance.go
+++ b/pkg/ingester-rf1/instance.go
@@ -20,10 +20,8 @@ import (
"github.com/grafana/loki/v3/pkg/ingester/index"
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
- "github.com/grafana/loki/v3/pkg/logql/log"
"github.com/grafana/loki/v3/pkg/logql/syntax"
"github.com/grafana/loki/v3/pkg/runtime"
- "github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/util/constants"
util_log "github.com/grafana/loki/v3/pkg/util/log"
@@ -69,7 +67,7 @@ type instance struct {
streamsCreatedTotal prometheus.Counter
streamsRemovedTotal prometheus.Counter
- //tailers map[uint32]*tailer
+ // tailers map[uint32]*tailer
tailerMtx sync.RWMutex
limiter *Limiter
@@ -80,9 +78,6 @@ type instance struct {
metrics *ingesterMetrics
- chunkFilter chunk.RequestChunkFilterer
- pipelineWrapper log.PipelineWrapper
- extractorWrapper log.SampleExtractorWrapper
streamRateCalculator *StreamRateCalculator
writeFailures *writefailures.Manager
@@ -123,9 +118,6 @@ func newInstance(
limiter *Limiter,
configs *runtime.TenantConfigs,
metrics *ingesterMetrics,
- chunkFilter chunk.RequestChunkFilterer,
- pipelineWrapper log.PipelineWrapper,
- extractorWrapper log.SampleExtractorWrapper,
streamRateCalculator *StreamRateCalculator,
writeFailures *writefailures.Manager,
customStreamsTracker push.UsageTracker,
@@ -154,9 +146,6 @@ func newInstance(
ownedStreamsSvc: ownedStreamsSvc,
configs: configs,
metrics: metrics,
- chunkFilter: chunkFilter,
- pipelineWrapper: pipelineWrapper,
- extractorWrapper: extractorWrapper,
streamRateCalculator: streamRateCalculator,
@@ -286,7 +275,7 @@ func (i *instance) onStreamCreated(s *stream) {
memoryStreams.WithLabelValues(i.instanceID).Inc()
memoryStreamsLabelsBytes.Add(float64(len(s.labels.String())))
i.streamsCreatedTotal.Inc()
- //i.addTailersToNewStream(s)
+ // i.addTailersToNewStream(s)
streamsCountStats.Add(1)
i.ownedStreamsSvc.incOwnedStreamCount()
if i.configs.LogStreamCreation(i.instanceID) {
diff --git a/pkg/ingester-rf1/objstore/storage.go b/pkg/ingester-rf1/objstore/storage.go
new file mode 100644
index 0000000000000..8ec7cf6970838
--- /dev/null
+++ b/pkg/ingester-rf1/objstore/storage.go
@@ -0,0 +1,132 @@
+package objstore
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/grafana/loki/v3/pkg/storage"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client"
+ "github.com/grafana/loki/v3/pkg/storage/config"
+)
+
+type Multi struct {
+ stores []*storeEntry
+ storageConfig storage.Config
+}
+
+type storeEntry struct {
+ start model.Time
+ cfg config.PeriodConfig
+ objectClient client.ObjectClient
+}
+
+var _ client.ObjectClient = (*Multi)(nil)
+
+func New(
+ periodicConfigs []config.PeriodConfig,
+ storageConfig storage.Config,
+ clientMetrics storage.ClientMetrics,
+) (*Multi, error) {
+ store := &Multi{
+ storageConfig: storageConfig,
+ }
+ // sort by From time
+ sort.Slice(periodicConfigs, func(i, j int) bool {
+ return periodicConfigs[i].From.Time.Before(periodicConfigs[i].From.Time)
+ })
+ for _, periodicConfig := range periodicConfigs {
+ objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, storageConfig, clientMetrics)
+ if err != nil {
+ return nil, fmt.Errorf("creating object client for period %s: %w ", periodicConfig.From, err)
+ }
+ store.stores = append(store.stores, &storeEntry{
+ start: periodicConfig.From.Time,
+ cfg: periodicConfig,
+ objectClient: objectClient,
+ })
+ }
+ return store, nil
+}
+
+func (m *Multi) GetStoreFor(ts model.Time) (client.ObjectClient, error) {
+ // find the schema with the lowest start _after_ tm
+ j := sort.Search(len(m.stores), func(j int) bool {
+ return m.stores[j].start > ts
+ })
+
+ // reduce it by 1 because we want a schema with start <= tm
+ j--
+
+ if 0 <= j && j < len(m.stores) {
+ return m.stores[j].objectClient, nil
+ }
+
+ // should in theory never happen
+ return nil, fmt.Errorf("no store found for timestamp %s", ts)
+}
+
+func (m *Multi) ObjectExists(ctx context.Context, objectKey string) (bool, error) {
+ s, err := m.GetStoreFor(model.Now())
+ if err != nil {
+ return false, err
+ }
+ return s.ObjectExists(ctx, objectKey)
+}
+
+func (m *Multi) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
+ s, err := m.GetStoreFor(model.Now())
+ if err != nil {
+ return err
+ }
+ return s.PutObject(ctx, objectKey, object)
+}
+
+func (m *Multi) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) {
+ s, err := m.GetStoreFor(model.Now())
+ if err != nil {
+ return nil, 0, err
+ }
+ return s.GetObject(ctx, objectKey)
+}
+
+func (m *Multi) List(ctx context.Context, prefix string, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
+ s, err := m.GetStoreFor(model.Now())
+ if err != nil {
+ return nil, nil, err
+ }
+ return s.List(ctx, prefix, delimiter)
+}
+
+func (m *Multi) DeleteObject(ctx context.Context, objectKey string) error {
+ s, err := m.GetStoreFor(model.Now())
+ if err != nil {
+ return err
+ }
+ return s.DeleteObject(ctx, objectKey)
+}
+
+func (m *Multi) IsObjectNotFoundErr(err error) bool {
+ s, _ := m.GetStoreFor(model.Now())
+ if s == nil {
+ return false
+ }
+ return s.IsObjectNotFoundErr(err)
+}
+
+func (m *Multi) IsRetryableErr(err error) bool {
+ s, _ := m.GetStoreFor(model.Now())
+ if s == nil {
+ return false
+ }
+ return s.IsRetryableErr(err)
+}
+
+func (m *Multi) Stop() {
+ for _, s := range m.stores {
+ s.objectClient.Stop()
+ }
+}
diff --git a/pkg/ingester-rf1/stream.go b/pkg/ingester-rf1/stream.go
index 932c6244bbf20..8bd7bdd0e329d 100644
--- a/pkg/ingester-rf1/stream.go
+++ b/pkg/ingester-rf1/stream.go
@@ -52,8 +52,8 @@ type stream struct {
metrics *ingesterMetrics
- //tailers map[uint32]*tailer
- //tailerMtx sync.RWMutex
+ // tailers map[uint32]*tailer
+ // tailerMtx sync.RWMutex
// entryCt is a counter which is incremented on each accepted entry.
// This allows us to discard WAL entries during replays which were
@@ -63,7 +63,7 @@ type stream struct {
entryCt int64
unorderedWrites bool
- //streamRateCalculator *StreamRateCalculator
+ // streamRateCalculator *StreamRateCalculator
writeFailures *writefailures.Manager
@@ -95,11 +95,11 @@ func newStream(
fp model.Fingerprint,
labels labels.Labels,
unorderedWrites bool,
- //streamRateCalculator *StreamRateCalculator,
+ // streamRateCalculator *StreamRateCalculator,
metrics *ingesterMetrics,
writeFailures *writefailures.Manager,
) *stream {
- //hashNoShard, _ := labels.HashWithoutLabels(make([]byte, 0, 1024), ShardLbName)
+ // hashNoShard, _ := labels.HashWithoutLabels(make([]byte, 0, 1024), ShardLbName)
return &stream{
limiter: NewStreamRateLimiter(limits, tenant, 10*time.Second),
cfg: cfg,
@@ -107,11 +107,11 @@ func newStream(
labels: labels,
labelsString: labels.String(),
labelHash: labels.Hash(),
- //labelHashNoShard: hashNoShard,
- //tailers: map[uint32]*tailer{},
+ // labelHashNoShard: hashNoShard,
+ // tailers: map[uint32]*tailer{},
metrics: metrics,
tenant: tenant,
- //streamRateCalculator: streamRateCalculator,
+ // streamRateCalculator: streamRateCalculator,
unorderedWrites: unorderedWrites,
writeFailures: writeFailures,
@@ -137,13 +137,12 @@ func (s *stream) Push(
usageTracker push.UsageTracker,
flushCtx *flushCtx,
) (int, error) {
-
toStore, invalid := s.validateEntries(ctx, entries, rateLimitWholeStream, usageTracker)
if rateLimitWholeStream && hasRateLimitErr(invalid) {
return 0, errorForFailedEntries(s, invalid, len(entries))
}
- bytesAdded, _ := s.storeEntries(ctx, toStore, usageTracker, flushCtx)
+ bytesAdded := s.storeEntries(ctx, toStore, usageTracker, flushCtx)
return bytesAdded, errorForFailedEntries(s, invalid, len(entries))
}
@@ -196,7 +195,7 @@ func hasRateLimitErr(errs []entryWithError) bool {
return ok
}
-func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usageTracker push.UsageTracker, flushCtx *flushCtx) (int, []*logproto.Entry) {
+func (s *stream) storeEntries(ctx context.Context, entries []*logproto.Entry, usageTracker push.UsageTracker, flushCtx *flushCtx) int {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "stream started to store entries", "labels", s.labelsString)
defer sp.LogKV("event", "stream finished to store entries")
@@ -204,7 +203,6 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usa
var bytesAdded, outOfOrderSamples, outOfOrderBytes int
- storedEntries := make([]*logproto.Entry, 0, len(entries))
for i := 0; i < len(entries); i++ {
s.entryCt++
s.lastLine.ts = entries[i].Timestamp
@@ -214,15 +212,13 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usa
}
bytesAdded += len(entries[i].Line)
- storedEntries = append(storedEntries, &entries[i])
}
- flushCtx.segmentWriter.Append(s.tenant, s.labels.String(), s.labels, storedEntries)
+ flushCtx.segmentWriter.Append(s.tenant, s.labels.String(), s.labels, entries)
s.reportMetrics(ctx, outOfOrderSamples, outOfOrderBytes, 0, 0, usageTracker)
- return bytesAdded, storedEntries
+ return bytesAdded
}
-func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry, rateLimitWholeStream bool, usageTracker push.UsageTracker) ([]logproto.Entry, []entryWithError) {
-
+func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry, rateLimitWholeStream bool, usageTracker push.UsageTracker) ([]*logproto.Entry, []entryWithError) {
var (
outOfOrderSamples, outOfOrderBytes int
rateLimitedSamples, rateLimitedBytes int
@@ -231,7 +227,7 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
limit = s.limiter.lim.Limit()
lastLine = s.lastLine
highestTs = s.highestTs
- toStore = make([]logproto.Entry, 0, len(entries))
+ toStore = make([]*logproto.Entry, 0, len(entries))
)
for i := range entries {
@@ -277,7 +273,7 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
highestTs = entries[i].Timestamp
}
- toStore = append(toStore, entries[i])
+ toStore = append(toStore, &entries[i])
}
// Each successful call to 'AllowN' advances the limiter. With all-or-nothing
@@ -289,12 +285,12 @@ func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry,
rateLimitedSamples = len(toStore)
failedEntriesWithError = make([]entryWithError, 0, len(toStore))
for i := 0; i < len(toStore); i++ {
- failedEntriesWithError = append(failedEntriesWithError, entryWithError{&toStore[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(len(toStore[i].Line))}})
+ failedEntriesWithError = append(failedEntriesWithError, entryWithError{toStore[i], &validation.ErrStreamRateLimit{RateLimit: flagext.ByteSize(limit), Labels: s.labelsString, Bytes: flagext.ByteSize(len(toStore[i].Line))}})
rateLimitedBytes += len(toStore[i].Line)
}
}
- //s.streamRateCalculator.Record(s.tenant, s.labelHash, s.labelHashNoShard, totalBytes)
+ // s.streamRateCalculator.Record(s.tenant, s.labelHash, s.labelHashNoShard, totalBytes)
s.reportMetrics(ctx, outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes, usageTracker)
return toStore, failedEntriesWithError
}
diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go
index a33b9d8ba4ee0..69462a3d352a5 100644
--- a/pkg/ingester/flush_test.go
+++ b/pkg/ingester/flush_test.go
@@ -37,7 +37,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding"
- walsegment "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util/constants"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -433,10 +432,6 @@ func defaultIngesterTestConfig(t testing.TB) Config {
return cfg
}
-func (s *testStore) PutWal(_ context.Context, _ *walsegment.SegmentWriter) error {
- return nil
-}
-
func (s *testStore) Put(ctx context.Context, chunks []chunk.Chunk) error {
s.mtx.Lock()
defer s.mtx.Unlock()
diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go
index d9f924352d0f6..871a3082e0d3f 100644
--- a/pkg/ingester/ingester_test.go
+++ b/pkg/ingester/ingester_test.go
@@ -48,7 +48,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume"
"github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util/constants"
"github.com/grafana/loki/v3/pkg/validation"
)
@@ -436,10 +435,6 @@ type mockStore struct {
chunks map[string][]chunk.Chunk
}
-func (s *mockStore) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return nil
-}
-
func (s *mockStore) Put(ctx context.Context, chunks []chunk.Chunk) error {
s.mtx.Lock()
defer s.mtx.Unlock()
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index 9734d29a7e031..f7fb901e0cb96 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -644,7 +644,7 @@ func (t *Loki) initIngesterRF1() (_ services.Service, err error) {
level.Warn(util_log.Logger).Log("msg", "The config setting shutdown marker path is not set. The /ingester/prepare_shutdown endpoint won't work")
}
- t.IngesterRF1, err = ingester_rf1.New(t.Cfg.IngesterRF1, t.Cfg.IngesterRF1Client, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker, t.ring)
+ t.IngesterRF1, err = ingester_rf1.New(t.Cfg.IngesterRF1, t.Cfg.IngesterRF1Client, t.Cfg.SchemaConfig.Configs, t.Cfg.StorageConfig, t.ClientMetrics, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker, t.ring)
if err != nil {
fmt.Println("Error initializing ingester rf1", err)
return
diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go
index 60d26fee28d2a..20c3b9f1b77c2 100644
--- a/pkg/querier/querier_mock_test.go
+++ b/pkg/querier/querier_mock_test.go
@@ -8,7 +8,6 @@ import (
"time"
"github.com/grafana/loki/v3/pkg/logql/log"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/loghttp"
@@ -340,9 +339,6 @@ func (s *storeMock) GetChunks(ctx context.Context, userID string, from, through
return args.Get(0).([][]chunk.Chunk), args.Get(0).([]*fetcher.Fetcher), args.Error(2)
}
-func (s *storeMock) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return errors.New("storeMock.PutWal() has not been mocked")
-}
func (s *storeMock) Put(_ context.Context, _ []chunk.Chunk) error {
return errors.New("storeMock.Put() has not been mocked")
}
diff --git a/pkg/storage/chunk/client/alibaba/oss_object_client.go b/pkg/storage/chunk/client/alibaba/oss_object_client.go
index 3e7674467ae30..d8446f6db9055 100644
--- a/pkg/storage/chunk/client/alibaba/oss_object_client.go
+++ b/pkg/storage/chunk/client/alibaba/oss_object_client.go
@@ -106,18 +106,16 @@ func (s *OssObjectClient) GetObject(ctx context.Context, objectKey string) (io.R
return nil, 0, err
}
return resp.Response.Body, int64(size), err
-
}
// PutObject puts the specified bytes into the configured OSS bucket at the provided key
-func (s *OssObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+func (s *OssObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
return instrument.CollectedRequest(ctx, "OSS.PutObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
if err := s.defaultBucket.PutObject(objectKey, object); err != nil {
return errors.Wrap(err, "failed to put oss object")
}
return nil
})
-
}
// List implements chunk.ObjectClient.
diff --git a/pkg/storage/chunk/client/aws/dynamodb_storage_client.go b/pkg/storage/chunk/client/aws/dynamodb_storage_client.go
index b70c4269ede23..87fd24e127db0 100644
--- a/pkg/storage/chunk/client/aws/dynamodb_storage_client.go
+++ b/pkg/storage/chunk/client/aws/dynamodb_storage_client.go
@@ -33,7 +33,6 @@ import (
client_util "github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/series/index"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/log"
"github.com/grafana/loki/v3/pkg/util/math"
@@ -119,10 +118,6 @@ type dynamoDBStorageClient struct {
metrics *dynamoDBMetrics
}
-func (a dynamoDBStorageClient) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return errors.New("not implemented")
-}
-
// NewDynamoDBIndexClient makes a new DynamoDB-backed IndexClient.
func NewDynamoDBIndexClient(cfg DynamoDBConfig, schemaCfg config.SchemaConfig, reg prometheus.Registerer) (index.Client, error) {
return newDynamoDBStorageClient(cfg, schemaCfg, reg)
diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go
index bae0fce22df7f..c2a50dd16ff6a 100644
--- a/pkg/storage/chunk/client/aws/s3_storage_client.go
+++ b/pkg/storage/chunk/client/aws/s3_storage_client.go
@@ -30,6 +30,7 @@ import (
bucket_s3 "github.com/grafana/loki/v3/pkg/storage/bucket/s3"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/hedging"
+ clientutil "github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
storageawscommon "github.com/grafana/loki/v3/pkg/storage/common/aws"
"github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/constants"
@@ -309,7 +310,6 @@ func (a *S3ObjectClient) ObjectExists(ctx context.Context, objectKey string) (bo
_, err := a.S3.HeadObject(headObjectInput)
return err
})
-
if err != nil {
return false, err
}
@@ -381,10 +381,14 @@ func (a *S3ObjectClient) GetObject(ctx context.Context, objectKey string) (io.Re
}
// PutObject into the store
-func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
return loki_instrument.TimeRequest(ctx, "S3.PutObject", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
+ readSeeker, err := clientutil.ReadSeeker(object)
+ if err != nil {
+ return err
+ }
putObjectInput := &s3.PutObjectInput{
- Body: object,
+ Body: readSeeker,
Bucket: aws.String(a.bucketFromKey(objectKey)),
Key: aws.String(objectKey),
StorageClass: aws.String(a.cfg.StorageClass),
@@ -396,7 +400,7 @@ func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object
putObjectInput.SSEKMSEncryptionContext = a.sseConfig.KMSEncryptionContext
}
- _, err := a.S3.PutObjectWithContext(ctx, putObjectInput)
+ _, err = a.S3.PutObjectWithContext(ctx, putObjectInput)
return err
})
}
@@ -405,7 +409,7 @@ func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object
func (a *S3ObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
var storageObjects []client.StorageObject
var commonPrefixes []client.StorageCommonPrefix
- var commonPrefixesSet = make(map[string]bool)
+ commonPrefixesSet := make(map[string]bool)
for i := range a.bucketNames {
err := loki_instrument.TimeRequest(ctx, "S3.List", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
diff --git a/pkg/storage/chunk/client/azure/blob_storage_client.go b/pkg/storage/chunk/client/azure/blob_storage_client.go
index 7c5f5bb496ca0..f1e93b993d56f 100644
--- a/pkg/storage/chunk/client/azure/blob_storage_client.go
+++ b/pkg/storage/chunk/client/azure/blob_storage_client.go
@@ -229,7 +229,6 @@ func (b *BlobStorage) ObjectExists(ctx context.Context, objectKey string) (bool,
_, err = blockBlobURL.GetProperties(ctx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
return err
})
-
if err != nil {
return false, err
}
@@ -278,7 +277,7 @@ func (b *BlobStorage) getObject(ctx context.Context, objectKey string) (rc io.Re
return downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: b.cfg.MaxRetries}), downloadResponse.ContentLength(), nil
}
-func (b *BlobStorage) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+func (b *BlobStorage) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
return loki_instrument.TimeRequest(ctx, "azure.PutObject", instrument.NewHistogramCollector(b.metrics.requestDuration), instrument.ErrorCode, func(ctx context.Context) error {
blockBlobURL, err := b.getBlobURL(objectKey, false)
if err != nil {
diff --git a/pkg/storage/chunk/client/baidubce/bos_storage_client.go b/pkg/storage/chunk/client/baidubce/bos_storage_client.go
index 30a9e97f4955f..b9abd8c90dbf7 100644
--- a/pkg/storage/chunk/client/baidubce/bos_storage_client.go
+++ b/pkg/storage/chunk/client/baidubce/bos_storage_client.go
@@ -79,7 +79,7 @@ func NewBOSObjectStorage(cfg *BOSStorageConfig) (*BOSObjectStorage, error) {
}, nil
}
-func (b *BOSObjectStorage) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+func (b *BOSObjectStorage) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
return instrument.CollectedRequest(ctx, "BOS.PutObject", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
body, err := bce.NewBodyFromSizedReader(object, -1)
if err != nil {
diff --git a/pkg/storage/chunk/client/cassandra/storage_client.go b/pkg/storage/chunk/client/cassandra/storage_client.go
index 732491de2df8a..70551591f63f5 100644
--- a/pkg/storage/chunk/client/cassandra/storage_client.go
+++ b/pkg/storage/chunk/client/cassandra/storage_client.go
@@ -23,7 +23,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/series/index"
- "github.com/grafana/loki/v3/pkg/storage/wal"
util_log "github.com/grafana/loki/v3/pkg/util/log"
)
@@ -544,6 +543,7 @@ func NewObjectClient(cfg Config, schemaCfg config.SchemaConfig, registerer prome
}
return client, nil
}
+
func (s *ObjectClient) reconnectWriteSession() error {
s.writeMtx.Lock()
defer s.writeMtx.Unlock()
@@ -568,10 +568,6 @@ func (s *ObjectClient) reconnectReadSession() error {
return nil
}
-func (s *ObjectClient) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return errors.New("not implemented")
-}
-
// PutChunks implements chunk.ObjectClient.
func (s *ObjectClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error {
err := s.putChunks(ctx, chunks)
diff --git a/pkg/storage/chunk/client/client.go b/pkg/storage/chunk/client/client.go
index 800086c6616be..36b65d40b6c2e 100644
--- a/pkg/storage/chunk/client/client.go
+++ b/pkg/storage/chunk/client/client.go
@@ -6,7 +6,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/stores/series/index"
- "github.com/grafana/loki/v3/pkg/storage/wal"
)
var (
@@ -19,7 +18,6 @@ var (
// Client is for storing and retrieving chunks.
type Client interface {
Stop()
- PutWal(ctx context.Context, writer *wal.SegmentWriter) error
PutChunks(ctx context.Context, chunks []chunk.Chunk) error
GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error)
DeleteChunk(ctx context.Context, userID, chunkID string) error
diff --git a/pkg/storage/chunk/client/congestion/controller.go b/pkg/storage/chunk/client/congestion/controller.go
index e7f29fab47638..6a60a2ce7aeaa 100644
--- a/pkg/storage/chunk/client/congestion/controller.go
+++ b/pkg/storage/chunk/client/congestion/controller.go
@@ -83,7 +83,7 @@ func (a *AIMDController) withLogger(logger log.Logger) Controller {
return a
}
-func (a *AIMDController) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+func (a *AIMDController) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
return a.inner.PutObject(ctx, objectKey, object)
}
@@ -208,11 +208,12 @@ func NewNoopController(Config) *NoopController {
return &NoopController{}
}
-func (n *NoopController) ObjectExists(context.Context, string) (bool, error) { return true, nil }
-func (n *NoopController) PutObject(context.Context, string, io.ReadSeeker) error { return nil }
+func (n *NoopController) ObjectExists(context.Context, string) (bool, error) { return true, nil }
+func (n *NoopController) PutObject(context.Context, string, io.Reader) error { return nil }
func (n *NoopController) GetObject(context.Context, string) (io.ReadCloser, int64, error) {
return nil, 0, nil
}
+
func (n *NoopController) List(context.Context, string, string) ([]client.StorageObject, []client.StorageCommonPrefix, error) {
return nil, nil, nil
}
@@ -226,14 +227,17 @@ func (n *NoopController) withLogger(logger log.Logger) Controller {
n.logger = logger
return n
}
+
func (n *NoopController) withRetrier(r Retrier) Controller {
n.retrier = r
return n
}
+
func (n *NoopController) withHedger(h Hedger) Controller {
n.hedger = h
return n
}
+
func (n *NoopController) withMetrics(m *Metrics) Controller {
n.metrics = m
return n
diff --git a/pkg/storage/chunk/client/congestion/controller_test.go b/pkg/storage/chunk/client/congestion/controller_test.go
index 49edfe563ae99..23fa8a4196763 100644
--- a/pkg/storage/chunk/client/congestion/controller_test.go
+++ b/pkg/storage/chunk/client/congestion/controller_test.go
@@ -247,7 +247,7 @@ type mockObjectClient struct {
nonRetryableErrs bool
}
-func (m *mockObjectClient) PutObject(context.Context, string, io.ReadSeeker) error {
+func (m *mockObjectClient) PutObject(context.Context, string, io.Reader) error {
panic("not implemented")
}
diff --git a/pkg/storage/chunk/client/gcp/bigtable_object_client.go b/pkg/storage/chunk/client/gcp/bigtable_object_client.go
index 992e4bff926e0..d878bc19bccf0 100644
--- a/pkg/storage/chunk/client/gcp/bigtable_object_client.go
+++ b/pkg/storage/chunk/client/gcp/bigtable_object_client.go
@@ -12,7 +12,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
"github.com/grafana/loki/v3/pkg/storage/config"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util/math"
)
@@ -84,10 +83,6 @@ func (s *bigtableObjectClient) PutChunks(ctx context.Context, chunks []chunk.Chu
return nil
}
-func (s *bigtableObjectClient) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return errors.New("not implemented")
-}
-
func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chunk) ([]chunk.Chunk, error) {
sp, ctx := ot.StartSpanFromContext(ctx, "GetChunks")
defer sp.Finish()
diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client.go b/pkg/storage/chunk/client/gcp/gcs_object_client.go
index 2f724e159ae2b..57d26b334ece6 100644
--- a/pkg/storage/chunk/client/gcp/gcs_object_client.go
+++ b/pkg/storage/chunk/client/gcp/gcs_object_client.go
@@ -161,7 +161,7 @@ func (s *GCSObjectClient) getObject(ctx context.Context, objectKey string) (rc i
}
// PutObject puts the specified bytes into the configured GCS bucket at the provided key
-func (s *GCSObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+func (s *GCSObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
writer := s.defaultBucket.Object(objectKey).NewWriter(ctx)
// Default GCSChunkSize is 8M and for each call, 8M is allocated xD
// By setting it to 0, we just upload the object in a single a request
diff --git a/pkg/storage/chunk/client/grpc/storage_client.go b/pkg/storage/chunk/client/grpc/storage_client.go
index 8c1284ba1de49..42ee00507e412 100644
--- a/pkg/storage/chunk/client/grpc/storage_client.go
+++ b/pkg/storage/chunk/client/grpc/storage_client.go
@@ -9,7 +9,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/config"
- "github.com/grafana/loki/v3/pkg/storage/wal"
)
type StorageClient struct {
@@ -67,10 +66,6 @@ func (s *StorageClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) err
return nil
}
-func (s *StorageClient) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return errors.New("not implemented")
-}
-
func (s *StorageClient) DeleteChunk(ctx context.Context, _, chunkID string) error {
chunkInfo := &ChunkID{ChunkID: chunkID}
_, err := s.client.DeleteChunks(ctx, chunkInfo)
diff --git a/pkg/storage/chunk/client/ibmcloud/cos_object_client.go b/pkg/storage/chunk/client/ibmcloud/cos_object_client.go
index c9d534ae4163f..6126df1b23323 100644
--- a/pkg/storage/chunk/client/ibmcloud/cos_object_client.go
+++ b/pkg/storage/chunk/client/ibmcloud/cos_object_client.go
@@ -29,6 +29,7 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/hedging"
+ "github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
"github.com/grafana/loki/v3/pkg/util/constants"
"github.com/grafana/loki/v3/pkg/util/log"
)
@@ -327,7 +328,6 @@ func (c *COSObjectClient) ObjectExists(ctx context.Context, objectKey string) (b
})
return requestErr
})
-
if err != nil {
return false, err
}
@@ -337,7 +337,6 @@ func (c *COSObjectClient) ObjectExists(ctx context.Context, objectKey string) (b
// GetObject returns a reader and the size for the specified object key from the configured S3 bucket.
func (c *COSObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) {
-
var resp *cos.GetObjectOutput
// Map the key into a bucket
@@ -370,15 +369,19 @@ func (c *COSObjectClient) GetObject(ctx context.Context, objectKey string) (io.R
}
// PutObject into the store
-func (c *COSObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+func (c *COSObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
return instrument.CollectedRequest(ctx, "COS.PutObject", cosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
+ readSeeker, err := util.ReadSeeker(object)
+ if err != nil {
+ return err
+ }
putObjectInput := &cos.PutObjectInput{
- Body: object,
+ Body: readSeeker,
Bucket: ibm.String(c.bucketFromKey(objectKey)),
Key: ibm.String(objectKey),
}
- _, err := c.cos.PutObjectWithContext(ctx, putObjectInput)
+ _, err = c.cos.PutObjectWithContext(ctx, putObjectInput)
return err
})
}
diff --git a/pkg/storage/chunk/client/local/fs_object_client.go b/pkg/storage/chunk/client/local/fs_object_client.go
index 41e911cb28c03..bde92c83025de 100644
--- a/pkg/storage/chunk/client/local/fs_object_client.go
+++ b/pkg/storage/chunk/client/local/fs_object_client.go
@@ -89,7 +89,7 @@ func (f *FSObjectClient) GetObject(_ context.Context, objectKey string) (io.Read
}
// PutObject into the store
-func (f *FSObjectClient) PutObject(_ context.Context, objectKey string, object io.ReadSeeker) error {
+func (f *FSObjectClient) PutObject(_ context.Context, objectKey string, object io.Reader) error {
fullPath := filepath.Join(f.cfg.Directory, filepath.FromSlash(objectKey))
err := util.EnsureDirectory(filepath.Dir(fullPath))
if err != nil {
diff --git a/pkg/storage/chunk/client/metrics.go b/pkg/storage/chunk/client/metrics.go
index 5e1ba5b41869b..76ca20a1bac5f 100644
--- a/pkg/storage/chunk/client/metrics.go
+++ b/pkg/storage/chunk/client/metrics.go
@@ -7,7 +7,6 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/grafana/loki/v3/pkg/storage/chunk"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util/constants"
)
@@ -61,10 +60,6 @@ func (c MetricsChunkClient) Stop() {
c.Client.Stop()
}
-func (c MetricsChunkClient) PutWal(ctx context.Context, writer *wal.SegmentWriter) error {
- return c.Client.PutWal(ctx, writer)
-}
-
func (c MetricsChunkClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error {
if err := c.Client.PutChunks(ctx, chunks); err != nil {
return err
diff --git a/pkg/storage/chunk/client/object_client.go b/pkg/storage/chunk/client/object_client.go
index 7632647bd9102..460c9566f6e76 100644
--- a/pkg/storage/chunk/client/object_client.go
+++ b/pkg/storage/chunk/client/object_client.go
@@ -3,26 +3,23 @@ package client
import (
"bytes"
"context"
- "crypto/rand"
"encoding/base64"
"io"
"strings"
"time"
- "github.com/oklog/ulid"
"github.com/pkg/errors"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
"github.com/grafana/loki/v3/pkg/storage/config"
- "github.com/grafana/loki/v3/pkg/storage/wal"
)
// ObjectClient is used to store arbitrary data in Object Store (S3/GCS/Azure/...)
type ObjectClient interface {
ObjectExists(ctx context.Context, objectKey string) (bool, error)
- PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error
+ PutObject(ctx context.Context, objectKey string, object io.Reader) error
// NOTE: The consumer of GetObject should always call the Close method when it is done reading which otherwise could cause a resource leak.
GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error)
@@ -108,19 +105,6 @@ func (o *client) Stop() {
o.store.Stop()
}
-func (o *client) PutWal(ctx context.Context, segment *wal.SegmentWriter) error {
- reader, err := segment.ToReader()
- if err != nil {
- return err
- }
- defer func(reader io.ReadSeekCloser) {
- _ = reader.Close()
- }(reader)
-
- newUlid := ulid.MustNew(ulid.Timestamp(time.Now()), rand.Reader)
- return o.store.PutObject(ctx, "loki-v2/wal/anon/"+newUlid.String(), reader)
-}
-
// PutChunks stores the provided chunks in the configured backend. If multiple errors are
// returned, the last one sequentially will be propagated up.
func (o *client) PutChunks(ctx context.Context, chunks []chunk.Chunk) error {
diff --git a/pkg/storage/chunk/client/openstack/swift_object_client.go b/pkg/storage/chunk/client/openstack/swift_object_client.go
index 96b836b0a909f..50ac21c12a753 100644
--- a/pkg/storage/chunk/client/openstack/swift_object_client.go
+++ b/pkg/storage/chunk/client/openstack/swift_object_client.go
@@ -145,7 +145,7 @@ func (s *SwiftObjectClient) GetObject(_ context.Context, objectKey string) (io.R
}
// PutObject puts the specified bytes into the configured Swift container at the provided key
-func (s *SwiftObjectClient) PutObject(_ context.Context, objectKey string, object io.ReadSeeker) error {
+func (s *SwiftObjectClient) PutObject(_ context.Context, objectKey string, object io.Reader) error {
_, err := s.conn.ObjectPut(s.cfg.ContainerName, objectKey, object, false, "", "", nil)
return err
}
diff --git a/pkg/storage/chunk/client/prefixed_object_client.go b/pkg/storage/chunk/client/prefixed_object_client.go
index aa792b21b9a76..1f887a64e2718 100644
--- a/pkg/storage/chunk/client/prefixed_object_client.go
+++ b/pkg/storage/chunk/client/prefixed_object_client.go
@@ -15,7 +15,7 @@ func NewPrefixedObjectClient(downstreamClient ObjectClient, prefix string) Objec
return PrefixedObjectClient{downstreamClient: downstreamClient, prefix: prefix}
}
-func (p PrefixedObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
+func (p PrefixedObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error {
return p.downstreamClient.PutObject(ctx, p.prefix+objectKey, object)
}
diff --git a/pkg/storage/chunk/client/testutils/inmemory_storage_client.go b/pkg/storage/chunk/client/testutils/inmemory_storage_client.go
index 5f2a95da76fdf..2056703522d11 100644
--- a/pkg/storage/chunk/client/testutils/inmemory_storage_client.go
+++ b/pkg/storage/chunk/client/testutils/inmemory_storage_client.go
@@ -426,7 +426,7 @@ func (m *InMemoryObjectClient) GetObject(_ context.Context, objectKey string) (i
}
// PutObject implements client.ObjectClient.
-func (m *InMemoryObjectClient) PutObject(_ context.Context, objectKey string, object io.ReadSeeker) error {
+func (m *InMemoryObjectClient) PutObject(_ context.Context, objectKey string, object io.Reader) error {
buf, err := io.ReadAll(object)
if err != nil {
return err
diff --git a/pkg/storage/chunk/client/util/reader.go b/pkg/storage/chunk/client/util/reader.go
new file mode 100644
index 0000000000000..2459b1e9ea43a
--- /dev/null
+++ b/pkg/storage/chunk/client/util/reader.go
@@ -0,0 +1,17 @@
+package util
+
+import (
+ "bytes"
+ "io"
+)
+
+func ReadSeeker(r io.Reader) (io.ReadSeeker, error) {
+ if rs, ok := r.(io.ReadSeeker); ok {
+ return rs, nil
+ }
+ data, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ return bytes.NewReader(data), nil
+}
diff --git a/pkg/storage/store.go b/pkg/storage/store.go
index 2f5d88941f36a..db4a0a498e17d 100644
--- a/pkg/storage/store.go
+++ b/pkg/storage/store.go
@@ -7,7 +7,6 @@ import (
"time"
"github.com/grafana/loki/v3/pkg/storage/types"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util/httpreq"
lokilog "github.com/grafana/loki/v3/pkg/logql/log"
@@ -609,7 +608,3 @@ func (f failingChunkWriter) Put(_ context.Context, _ []chunk.Chunk) error {
func (f failingChunkWriter) PutOne(_ context.Context, _, _ model.Time, _ chunk.Chunk) error {
return errWritingChunkUnsupported
}
-
-func (f failingChunkWriter) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return errWritingChunkUnsupported
-}
diff --git a/pkg/storage/stores/composite_store.go b/pkg/storage/stores/composite_store.go
index 484d8574f3cb3..182a686c889b8 100644
--- a/pkg/storage/stores/composite_store.go
+++ b/pkg/storage/stores/composite_store.go
@@ -15,16 +15,10 @@ import (
"github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
tsdb_index "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util"
)
-type WalSegmentWriter interface {
- PutWal(ctx context.Context, writer *wal.SegmentWriter) error
-}
-
type ChunkWriter interface {
- WalSegmentWriter
Put(ctx context.Context, chunks []chunk.Chunk) error
PutOne(ctx context.Context, from, through model.Time, chunk chunk.Chunk) error
}
@@ -51,7 +45,6 @@ type Store interface {
ChunkWriter
ChunkFetcher
ChunkFetcherProvider
- WalSegmentWriter
Stop()
}
@@ -95,12 +88,6 @@ func (c *CompositeStore) Stores() []Store {
return stores
}
-func (c CompositeStore) PutWal(ctx context.Context, writer *wal.SegmentWriter) error {
- // TODO: Understand how to use the forStores method to correctly pick a store for this
- err := c.stores[0].PutWal(ctx, writer)
- return err
-}
-
func (c CompositeStore) Put(ctx context.Context, chunks []chunk.Chunk) error {
for _, chunk := range chunks {
err := c.forStores(ctx, chunk.From, chunk.Through, func(innerCtx context.Context, from, through model.Time, store Store) error {
@@ -210,10 +197,8 @@ func (c CompositeStore) Stats(ctx context.Context, userID string, from, through
xs = append(xs, x)
return err
})
-
if err != nil {
return nil, err
-
}
res := stats.MergeStats(xs...)
return &res, err
@@ -226,7 +211,6 @@ func (c CompositeStore) Volume(ctx context.Context, userID string, from, through
volumes = append(volumes, volume)
return err
})
-
if err != nil {
return nil, err
}
@@ -254,7 +238,6 @@ func (c CompositeStore) GetShards(
groups = append(groups, shards)
return nil
})
-
if err != nil {
return nil, err
}
diff --git a/pkg/storage/stores/composite_store_test.go b/pkg/storage/stores/composite_store_test.go
index 064e19ca8bbf9..90062add1552d 100644
--- a/pkg/storage/stores/composite_store_test.go
+++ b/pkg/storage/stores/composite_store_test.go
@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/grafana/loki/v3/pkg/logproto"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/dskit/test"
"github.com/prometheus/common/model"
@@ -24,10 +23,6 @@ import (
type mockStore int
-func (m mockStore) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return nil
-}
-
func (m mockStore) Put(_ context.Context, _ []chunk.Chunk) error {
return nil
}
@@ -357,7 +352,6 @@ func TestVolume(t *testing.T) {
require.Error(t, err, "something bad")
require.Nil(t, volumes)
})
-
}
func TestFilterForTimeRange(t *testing.T) {
diff --git a/pkg/storage/stores/series_store_write.go b/pkg/storage/stores/series_store_write.go
index 2b134472eb2ea..a36ae4510b8e3 100644
--- a/pkg/storage/stores/series_store_write.go
+++ b/pkg/storage/stores/series_store_write.go
@@ -13,7 +13,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk/fetcher"
"github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/index"
- "github.com/grafana/loki/v3/pkg/storage/wal"
"github.com/grafana/loki/v3/pkg/util/constants"
"github.com/grafana/loki/v3/pkg/util/spanlogger"
)
@@ -66,10 +65,6 @@ func (c *Writer) Put(ctx context.Context, chunks []chunk.Chunk) error {
return nil
}
-func (c *Writer) PutWal(ctx context.Context, segment *wal.SegmentWriter) error {
- return c.fetcher.Client().PutWal(ctx, segment)
-}
-
// PutOne implements Store
func (c *Writer) PutOne(ctx context.Context, from, through model.Time, chk chunk.Chunk) error {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.PutOne")
diff --git a/pkg/storage/stores/series_store_write_test.go b/pkg/storage/stores/series_store_write_test.go
index 882fbaa00908b..f58ec1a730c52 100644
--- a/pkg/storage/stores/series_store_write_test.go
+++ b/pkg/storage/stores/series_store_write_test.go
@@ -13,7 +13,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/fetcher"
"github.com/grafana/loki/v3/pkg/storage/config"
- "github.com/grafana/loki/v3/pkg/storage/wal"
)
type mockCache struct {
@@ -56,10 +55,6 @@ type mockChunksClient struct {
called int
}
-func (m *mockChunksClient) PutWal(_ context.Context, _ *wal.SegmentWriter) error {
- return nil
-}
-
func (m *mockChunksClient) PutChunks(_ context.Context, _ []chunk.Chunk) error {
m.called++
return nil
@@ -67,15 +62,19 @@ func (m *mockChunksClient) PutChunks(_ context.Context, _ []chunk.Chunk) error {
func (m *mockChunksClient) Stop() {
}
+
func (m *mockChunksClient) GetChunks(_ context.Context, _ []chunk.Chunk) ([]chunk.Chunk, error) {
panic("GetChunks not implemented")
}
+
func (m *mockChunksClient) DeleteChunk(_ context.Context, _, _ string) error {
panic("DeleteChunk not implemented")
}
+
func (m *mockChunksClient) IsChunkNotFoundErr(_ error) bool {
panic("IsChunkNotFoundErr not implemented")
}
+
func (m *mockChunksClient) IsRetryableErr(_ error) bool {
panic("IsRetryableErr not implemented")
}
diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go
index b359132408902..5ef02e74b1caf 100644
--- a/pkg/storage/util_test.go
+++ b/pkg/storage/util_test.go
@@ -26,7 +26,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/stores"
index_stats "github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding"
- "github.com/grafana/loki/v3/pkg/storage/wal"
loki_util "github.com/grafana/loki/v3/pkg/util"
"github.com/grafana/loki/v3/pkg/util/constants"
util_log "github.com/grafana/loki/v3/pkg/util/log"
@@ -186,8 +185,7 @@ func newMockChunkStore(chunkFormat byte, headfmt chunkenc.HeadBlockFmt, streams
return &mockChunkStore{schemas: config.SchemaConfig{}, chunks: chunks, client: &mockChunkStoreClient{chunks: chunks, scfg: config.SchemaConfig{}}}
}
-func (m *mockChunkStore) PutWal(_ context.Context, _ *wal.SegmentWriter) error { return nil }
-func (m *mockChunkStore) Put(_ context.Context, _ []chunk.Chunk) error { return nil }
+func (m *mockChunkStore) Put(_ context.Context, _ []chunk.Chunk) error { return nil }
func (m *mockChunkStore) PutOne(_ context.Context, _, _ model.Time, _ chunk.Chunk) error {
return nil
}
@@ -294,7 +292,6 @@ func (m mockChunkStoreClient) Stop() {
panic("implement me")
}
-func (m mockChunkStoreClient) PutWal(_ context.Context, _ *wal.SegmentWriter) error { return nil }
func (m mockChunkStoreClient) PutChunks(_ context.Context, _ []chunk.Chunk) error {
return nil
}
diff --git a/pkg/storage/wal/segment.go b/pkg/storage/wal/segment.go
index fa8e42cc94a5d..e9fb2d51ea15b 100644
--- a/pkg/storage/wal/segment.go
+++ b/pkg/storage/wal/segment.go
@@ -20,7 +20,6 @@ import (
"github.com/grafana/loki/v3/pkg/storage/wal/chunks"
"github.com/grafana/loki/v3/pkg/storage/wal/index"
"github.com/grafana/loki/v3/pkg/util/encoding"
- "github.com/grafana/loki/v3/pkg/util/pool"
)
// LOKW is the magic number for the Loki WAL format.
@@ -35,10 +34,7 @@ var (
}
},
}
-
- // 512kb - 20 mb
- encodedWalSegmentBufferPool = pool.NewBuffer(512*1024, 20*1024*1024, 2)
- tenantLabel = "__loki_tenant__"
+ tenantLabel = "__loki_tenant__"
)
func init() {
@@ -276,45 +272,30 @@ func (b *SegmentWriter) Reset() {
b.inputSize.Store(0)
}
-func (b *SegmentWriter) ToReader() (io.ReadSeekCloser, error) {
- // snappy compression rate is ~5x , but we can not predict it, so we need to allocate bigger buffer to avoid allocations
- buffer := encodedWalSegmentBufferPool.Get(int(b.inputSize.Load() / 3))
- _, err := b.WriteTo(buffer)
- if err != nil {
- return nil, fmt.Errorf("failed to write segment to create a reader: %w", err)
- }
- return NewEncodedSegmentReader(buffer), nil
-}
-
-var (
- _ io.ReadSeekCloser = &EncodedSegmentReader{}
-)
-
type EncodedSegmentReader struct {
- delegate io.ReadSeeker
- encodedContent *bytes.Buffer
+ *io.PipeReader
+ *io.PipeWriter
}
-func NewEncodedSegmentReader(encodedContent *bytes.Buffer) *EncodedSegmentReader {
- return &EncodedSegmentReader{
- encodedContent: encodedContent,
- delegate: bytes.NewReader(encodedContent.Bytes()),
+func (e *EncodedSegmentReader) Close() error {
+ err := e.PipeWriter.Close()
+ if err != nil {
+ return err
}
+ err = e.PipeReader.Close()
+ if err != nil {
+ return err
+ }
+ return nil
}
-func (e *EncodedSegmentReader) Read(p []byte) (n int, err error) {
- return e.delegate.Read(p)
-}
-
-func (e *EncodedSegmentReader) Seek(offset int64, whence int) (int64, error) {
- return e.delegate.Seek(offset, whence)
-}
-
-func (e *EncodedSegmentReader) Close() error {
- encodedWalSegmentBufferPool.Put(e.encodedContent)
- e.encodedContent = nil
- e.delegate = nil
- return nil
+func (b *SegmentWriter) Reader() io.ReadCloser {
+ pr, pw := io.Pipe()
+ go func() {
+ _, err := b.WriteTo(pw)
+ pw.CloseWithError(err)
+ }()
+ return &EncodedSegmentReader{PipeReader: pr, PipeWriter: pw}
}
// InputSize returns the total size of the input data written to the writer.
diff --git a/pkg/storage/wal/segment_test.go b/pkg/storage/wal/segment_test.go
index 0e14028bd0531..beddd3d09f702 100644
--- a/pkg/storage/wal/segment_test.go
+++ b/pkg/storage/wal/segment_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
+ "io"
"sort"
"sync"
"testing"
@@ -510,16 +511,15 @@ func BenchmarkWrites(b *testing.B) {
}
})
- bytesBuf := make([]byte, inputSize)
+ bytesBuf := make([]byte, encodedLength)
b.Run("Reader", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
var err error
- reader, err := writer.ToReader()
- require.NoError(b, err)
+ reader := writer.Reader()
- n, err := reader.Read(bytesBuf)
+ n, err := io.ReadFull(reader, bytesBuf)
require.NoError(b, err)
require.EqualValues(b, encodedLength, n)
require.NoError(b, reader.Close())
|
chore
|
Refactor storage interface for rf1 (#13415)
|
102e3ea07fcb053aa446c4d94215cdb899e5b1d5
|
2024-03-06 23:26:52
|
Owen Diehl
|
chore(blooms): use the right iter reference in blockLoadingIter.loadNext (#12146)
| false
|
diff --git a/pkg/bloomcompactor/batch.go b/pkg/bloomcompactor/batch.go
index 660f5642b4648..f5d4426e9ff9d 100644
--- a/pkg/bloomcompactor/batch.go
+++ b/pkg/bloomcompactor/batch.go
@@ -251,7 +251,7 @@ func (i *blockLoadingIter) loadNext() bool {
iters := make([]v1.PeekingIterator[*v1.SeriesWithBloom], 0, len(blockRefs))
for filtered.Next() {
- bq := loader.At()
+ bq := filtered.At()
i.loaded[bq] = struct{}{}
iter, err := bq.SeriesIter()
if err != nil {
|
chore
|
use the right iter reference in blockLoadingIter.loadNext (#12146)
|
d165161eddd8eeb0ef0eefb1b97296390b3c54f9
|
2024-11-18 23:40:34
|
renovate[bot]
|
chore(deps): update actions/setup-go action to v5 (#14971)
| false
|
diff --git a/.github/workflows/operator-bundle.yaml b/.github/workflows/operator-bundle.yaml
index 39630e45b8f8d..e99fc0865612f 100644
--- a/.github/workflows/operator-bundle.yaml
+++ b/.github/workflows/operator-bundle.yaml
@@ -19,7 +19,7 @@ jobs:
go: ['1.22']
steps:
- name: Set up Go 1.x
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
id: go
diff --git a/.github/workflows/operator-scorecard.yaml b/.github/workflows/operator-scorecard.yaml
index 1a067a0ea1408..4fd8fe5852d39 100644
--- a/.github/workflows/operator-scorecard.yaml
+++ b/.github/workflows/operator-scorecard.yaml
@@ -19,7 +19,7 @@ jobs:
go: ['1.22']
steps:
- name: Set up Go 1.x
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
id: go
diff --git a/.github/workflows/operator.yaml b/.github/workflows/operator.yaml
index 639746aeb5ac6..e751dd0fd7f6c 100644
--- a/.github/workflows/operator.yaml
+++ b/.github/workflows/operator.yaml
@@ -21,7 +21,7 @@ jobs:
- name: Install make
run: sudo apt-get install make
- name: Set up Go 1.x
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
id: go
@@ -43,7 +43,7 @@ jobs:
- name: Install make
run: sudo apt-get install make
- name: Set up Go 1.x
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
id: go
@@ -69,7 +69,7 @@ jobs:
- name: Install make
run: sudo apt-get install make
- name: Set up Go 1.x
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
id: go
@@ -90,7 +90,7 @@ jobs:
- name: Install make
run: sudo apt-get install make
- name: Set up Go 1.x
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
id: go
@@ -111,7 +111,7 @@ jobs:
- name: Install make
run: sudo apt-get install make
- name: Set up Go 1.x
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
id: go
diff --git a/.github/workflows/promtail-windows-test.yml b/.github/workflows/promtail-windows-test.yml
index 90ccf72ad1bed..0aa131d769abb 100644
--- a/.github/workflows/promtail-windows-test.yml
+++ b/.github/workflows/promtail-windows-test.yml
@@ -14,7 +14,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Setup Go ${{ matrix.go-version }}
- uses: actions/setup-go@v4
+ uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
# You can test your matrix by printing the current Go version
|
chore
|
update actions/setup-go action to v5 (#14971)
|
6f491233cae226d54d190521d2b935249d88ad05
|
2024-09-03 17:59:06
|
renovate[bot]
|
fix(deps): update aws-sdk-go-v2 monorepo (#13986)
| false
|
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod
index fb35609574b72..9679448043513 100644
--- a/tools/lambda-promtail/go.mod
+++ b/tools/lambda-promtail/go.mod
@@ -5,8 +5,8 @@ go 1.22
require (
github.com/aws/aws-lambda-go v1.47.0
github.com/aws/aws-sdk-go-v2 v1.30.4
- github.com/aws/aws-sdk-go-v2/config v1.27.28
- github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0
+ github.com/aws/aws-sdk-go-v2/config v1.27.31
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0
github.com/go-kit/log v0.2.1
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
@@ -24,7 +24,7 @@ require (
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.17.28 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.30 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect
@@ -36,7 +36,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect
github.com/aws/smithy-go v1.20.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum
index 2627682cc9454..17803d1c55389 100644
--- a/tools/lambda-promtail/go.sum
+++ b/tools/lambda-promtail/go.sum
@@ -52,10 +52,10 @@ github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDag
github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw=
-github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg=
-github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c=
+github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI=
+github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY=
@@ -74,14 +74,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHC
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 h1:Cso4Ev/XauMVsbwdhYEoxg8rxZWw43CFqqaPB5w3W2c=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac=
-github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8=
-github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8=
+github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0=
github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4=
github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
fix
|
update aws-sdk-go-v2 monorepo (#13986)
|
4593e581151287fe954f7207ced218764adb64f8
|
2019-04-02 14:52:35
|
Xiang Dai
|
logcli: remove config part
| false
|
diff --git a/cmd/logcli/config.go b/cmd/logcli/config.go
deleted file mode 100644
index f465470f3cd79..0000000000000
--- a/cmd/logcli/config.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package main
-
-// Config is the root config for Logcli.
-type Config struct {
- Addr string `yaml:"addr,omitempty"`
- Username string `yaml:"username,omitempty"`
- Password string `yaml:"password,omitempty"`
-}
-
-func getConfig(configFile string) (*Config, error) {
- var config Config
-
- // if not specify config file, keep same with default value
- if configFile == "" {
- config = Config{Addr: "https://logs-us-west1.grafana.net"}
- return &config, nil
- }
-
- return &config, nil
-}
diff --git a/cmd/logcli/logcli-config.yaml b/cmd/logcli/logcli-config.yaml
deleted file mode 100644
index 5e499bb6839c9..0000000000000
--- a/cmd/logcli/logcli-config.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-addr: https://logs-us-west1.grafana.net
-
-username:
-
-password:
diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go
index 3ca1db269b296..e3b2e8982b7de 100644
--- a/cmd/logcli/main.go
+++ b/cmd/logcli/main.go
@@ -10,9 +10,9 @@ import (
var (
app = kingpin.New("logcli", "A command-line for loki.")
- config = app.Flag("config", "Logcli config.").Default("").String()
-
- addr, username, password *string
+ addr = app.Flag("addr", "Server address.").Default("https://logs-us-west1.grafana.net").Envar("GRAFANA_ADDR").String()
+ username = app.Flag("username", "Username for HTTP basic auth.").Default("").Envar("GRAFANA_USERNAME").String()
+ password = app.Flag("password", "Password for HTTP basic auth.").Default("").Envar("GRAFANA_PASSWORD").String()
queryCmd = app.Command("query", "Run a LogQL query.")
queryStr = queryCmd.Arg("query", "eg '{foo=\"bar\",baz=\"blip\"}'").Required().String()
@@ -27,17 +27,6 @@ var (
)
func main() {
- // get val from config file
- cfg, err := getConfig(*config)
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
-
- addr = app.Flag("addr", "Server address.").Default(cfg.Addr).Envar("GRAFANA_ADDR").String()
- username = app.Flag("username", "Username for HTTP basic auth.").Default(cfg.Username).Envar("GRAFANA_USERNAME").String()
- password = app.Flag("password", "Password for HTTP basic auth.").Default(cfg.Password).Envar("GRAFANA_PASSWORD").String()
-
switch kingpin.MustParse(app.Parse(os.Args[1:])) {
case queryCmd.FullCommand():
if *addr == "" {
|
logcli
|
remove config part
|
95dac7c16af9f2a73c8d8fab1db29bdecb8ad68a
|
2025-01-11 00:06:20
|
renovate[bot]
|
fix(deps): update module github.com/baidubce/bce-sdk-go to v0.9.213 (#15686)
| false
|
diff --git a/go.mod b/go.mod
index 35a7765481829..e8e0ef98a1e41 100644
--- a/go.mod
+++ b/go.mod
@@ -21,7 +21,7 @@ require (
github.com/alicebob/miniredis/v2 v2.34.0
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/aws/aws-sdk-go v1.55.5
- github.com/baidubce/bce-sdk-go v0.9.212
+ github.com/baidubce/bce-sdk-go v0.9.213
github.com/bmatcuk/doublestar/v4 v4.7.1
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
github.com/cespare/xxhash/v2 v2.3.0
diff --git a/go.sum b/go.sum
index ac4401d52bbde..20884796f08f3 100644
--- a/go.sum
+++ b/go.sum
@@ -224,8 +224,8 @@ github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/axiomhq/hyperloglog v0.2.3 h1:2ZGwz3FGcx77e9/aNjqJijsGhH6RZOlglzxnDpVBCQY=
github.com/axiomhq/hyperloglog v0.2.3/go.mod h1:DLUK9yIzpU5B6YFLjxTIcbHu1g4Y1WQb1m5RH3radaM=
-github.com/baidubce/bce-sdk-go v0.9.212 h1:B3PUoaFi4m13wP7gWObznjPLZ5umQ1BHjO/UoSsj3x4=
-github.com/baidubce/bce-sdk-go v0.9.212/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
+github.com/baidubce/bce-sdk-go v0.9.213 h1:4IxEiHvtMj5tJ9BCyre87bk7eAY/0TpzB4RVy/eSnos=
+github.com/baidubce/bce-sdk-go v0.9.213/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
diff --git a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
index 6ecf3aa8f2fbf..41a7dc993fdde 100644
--- a/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
+++ b/vendor/github.com/baidubce/bce-sdk-go/bce/config.go
@@ -26,7 +26,7 @@ import (
// Constants and default values for the package bce
const (
- SDK_VERSION = "0.9.212"
+ SDK_VERSION = "0.9.213"
URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path
DEFAULT_DOMAIN = "baidubce.com"
DEFAULT_PROTOCOL = "http"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index db7a74ff4da60..e7a56831d678c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -490,7 +490,7 @@ github.com/aws/smithy-go/transport/http/internal/io
# github.com/axiomhq/hyperloglog v0.2.3
## explicit; go 1.23
github.com/axiomhq/hyperloglog
-# github.com/baidubce/bce-sdk-go v0.9.212
+# github.com/baidubce/bce-sdk-go v0.9.213
## explicit; go 1.11
github.com/baidubce/bce-sdk-go/auth
github.com/baidubce/bce-sdk-go/bce
|
fix
|
update module github.com/baidubce/bce-sdk-go to v0.9.213 (#15686)
|
0c28cc7682194528b7a1821bc3fa68e64c8c73de
|
2024-08-12 19:12:21
|
renovate[bot]
|
chore(deps): update dependency go to v1.22.6 (#13842)
| false
|
diff --git a/cmd/chunks-inspect/go.mod b/cmd/chunks-inspect/go.mod
index 96cb79b2de859..e29979ad9f514 100644
--- a/cmd/chunks-inspect/go.mod
+++ b/cmd/chunks-inspect/go.mod
@@ -2,7 +2,7 @@ module github.com/grafana/loki/cmd/chunks-inspect
go 1.22
-toolchain go1.22.2
+toolchain go1.22.6
require (
github.com/golang/snappy v0.0.4
diff --git a/cmd/segment-inspect/go.mod b/cmd/segment-inspect/go.mod
index d441f6f046c2f..2694e216da3e8 100644
--- a/cmd/segment-inspect/go.mod
+++ b/cmd/segment-inspect/go.mod
@@ -2,7 +2,7 @@ module github.com/grafana/loki/cmd/segment-inspect
go 1.22
-toolchain go1.22.2
+toolchain go1.22.6
require (
github.com/dustin/go-humanize v1.0.1
diff --git a/go.mod b/go.mod
index 9cc38aac4084f..4e00cc85c8a1b 100644
--- a/go.mod
+++ b/go.mod
@@ -2,7 +2,7 @@ module github.com/grafana/loki/v3
go 1.21.8
-toolchain go1.22.4
+toolchain go1.22.6
require (
cloud.google.com/go/bigtable v1.18.1
|
chore
|
update dependency go to v1.22.6 (#13842)
|
bde42414a67d90790f494a4acf50a3dc1e33c8db
|
2024-08-29 19:27:50
|
renovate[bot]
|
chore(deps): update anchore/sbom-action action to v0.17.2 (#13985)
| false
|
diff --git a/.github/workflows/syft-sbom-ci.yml b/.github/workflows/syft-sbom-ci.yml
index c06d47904b514..ed1748a641cf2 100644
--- a/.github/workflows/syft-sbom-ci.yml
+++ b/.github/workflows/syft-sbom-ci.yml
@@ -14,7 +14,7 @@ jobs:
uses: actions/checkout@v4
- name: Anchore SBOM Action
- uses: anchore/[email protected]
+ uses: anchore/[email protected]
with:
artifact-name: ${{ github.event.repository.name }}-spdx.json
|
chore
|
update anchore/sbom-action action to v0.17.2 (#13985)
|
ab10bc69d70847a24778bb933bc246e2cf0a7e39
|
2021-10-11 12:27:04
|
Dylan Guedes
|
loki: Change how push API checks for contentType (#4443)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 20ae757029b6a..880107c65ff21 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
* [4400](https://github.com/grafana/loki/pull/4400) **trevorwhitney**: Config: automatically apply memberlist config too all rings when provided
+* [4443](https://github.com/grafana/loki/pull/4443) **DylanGuedes**: Loki: Change how push API checks for contentType
# 2.3.0 (2021/08/06)
diff --git a/pkg/loghttp/push/push.go b/pkg/loghttp/push/push.go
index dcd0eceb6493d..fba001e26d6ba 100644
--- a/pkg/loghttp/push/push.go
+++ b/pkg/loghttp/push/push.go
@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"math"
+ "mime"
"net/http"
"time"
@@ -78,8 +79,14 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete
req logproto.PushRequest
)
+ contentType, _ /* params */, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return nil, err
+ }
+
switch contentType {
case applicationJSON:
+
var err error
// todo once https://github.com/weaveworks/common/commit/73225442af7da93ec8f6a6e2f7c8aafaee3f8840 is in Loki.
diff --git a/pkg/loghttp/push/push_test.go b/pkg/loghttp/push/push_test.go
index 60e7ef67e88aa..c0970bd450d4b 100644
--- a/pkg/loghttp/push/push_test.go
+++ b/pkg/loghttp/push/push_test.go
@@ -72,6 +72,27 @@ func TestParseRequest(t *testing.T) {
contentEncoding: `snappy`,
valid: false,
},
+ {
+ path: `/loki/api/v1/push`,
+ body: gzipString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
+ contentType: `application/json; charset=utf-8`,
+ contentEncoding: `gzip`,
+ valid: true,
+ },
+ {
+ path: `/loki/api/v1/push`,
+ body: gzipString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
+ contentType: `application/jsonn; charset=utf-8`,
+ contentEncoding: `gzip`,
+ valid: false,
+ },
+ {
+ path: `/loki/api/v1/push`,
+ body: gzipString(`{"streams": [{ "stream": { "foo4": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}`),
+ contentType: `application/json; charsetutf-8`,
+ contentEncoding: `gzip`,
+ valid: false,
+ },
}
// Testing input array
|
loki
|
Change how push API checks for contentType (#4443)
|
1b87aeab4ef976b37e18d7225203c10f8b13fcbe
|
2023-05-23 01:01:30
|
Periklis Tsirakidis
|
operator: Add support for configuring HTTP server timeouts (#9405)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 4322497787d9a..1bb660a6bb6fa 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [9405](https://github.com/grafana/loki/pull/9405) **periklis**: Add support for configuring HTTP server timeouts
- [9378](https://github.com/grafana/loki/pull/9378) **aminesnow**: Add zone aware API spec validation
- [9408](https://github.com/grafana/loki/pull/9408) **JoaoBraveCoding**: Add PodAntiAffinity overwrites per component
- [9429](https://github.com/grafana/loki/pull/9429) **aminesnow**: Add default TopologySpreadContraints to Gateway
diff --git a/operator/apis/loki/v1/lokistack_types.go b/operator/apis/loki/v1/lokistack_types.go
index 13487fef96cd0..ffa87248a2805 100644
--- a/operator/apis/loki/v1/lokistack_types.go
+++ b/operator/apis/loki/v1/lokistack_types.go
@@ -561,7 +561,7 @@ type QueryLimitSpec struct {
//
// +optional
// +kubebuilder:validation:Optional
- // +kubebuilder:default:="1m"
+ // +kubebuilder:default:="3m"
// +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Query Timeout"
QueryTimeout string `json:"queryTimeout,omitempty"`
}
@@ -913,6 +913,8 @@ const (
ReasonMissingGatewayOpenShiftBaseDomain LokiStackConditionReason = "MissingGatewayOpenShiftBaseDomain"
// ReasonFailedCertificateRotation when the reconciler cannot rotate any of the required TLS certificates.
ReasonFailedCertificateRotation LokiStackConditionReason = "FailedCertificateRotation"
+ // ReasonQueryTimeoutInvalid when the QueryTimeout can not be parsed.
+ ReasonQueryTimeoutInvalid LokiStackConditionReason = "ReasonQueryTimeoutInvalid"
)
// PodStatusMap defines the type for mapping pod status to pod name.
diff --git a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
index 2eeebf0f1928b..b2a2045c762ad 100644
--- a/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community-openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:main-ac1c1fd
- createdAt: "2023-05-11T08:04:29Z"
+ createdAt: "2023-05-22T15:22:48Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
diff --git a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
index ff8522e4a84e1..658b0644090f4 100644
--- a/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community-openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -151,7 +151,7 @@ spec:
format: int32
type: integer
queryTimeout:
- default: 1m
+ default: 3m
description: Timeout when querying ingesters or storage
during the execution of a query request.
type: string
@@ -264,7 +264,7 @@ spec:
format: int32
type: integer
queryTimeout:
- default: 1m
+ default: 3m
description: Timeout when querying ingesters or storage
during the execution of a query request.
type: string
diff --git a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
index 3463595b0b2de..8242afb0436a3 100644
--- a/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/community/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: docker.io/grafana/loki-operator:main-ac1c1fd
- createdAt: "2023-05-11T08:04:26Z"
+ createdAt: "2023-05-22T15:22:44Z"
description: The Community Loki Operator provides Kubernetes native deployment
and management of Loki and related logging components.
operators.operatorframework.io/builder: operator-sdk-unknown
diff --git a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
index 54f79ddb76bf9..60f83f9f07462 100644
--- a/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/community/manifests/loki.grafana.com_lokistacks.yaml
@@ -151,7 +151,7 @@ spec:
format: int32
type: integer
queryTimeout:
- default: 1m
+ default: 3m
description: Timeout when querying ingesters or storage
during the execution of a query request.
type: string
@@ -264,7 +264,7 @@ spec:
format: int32
type: integer
queryTimeout:
- default: 1m
+ default: 3m
description: Timeout when querying ingesters or storage
during the execution of a query request.
type: string
diff --git a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
index 6a519756003af..8c34ad05b308f 100644
--- a/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
+++ b/operator/bundle/openshift/manifests/loki-operator.clusterserviceversion.yaml
@@ -150,7 +150,7 @@ metadata:
categories: OpenShift Optional, Logging & Tracing
certified: "false"
containerImage: quay.io/openshift-logging/loki-operator:v0.1.0
- createdAt: "2023-05-11T08:04:32Z"
+ createdAt: "2023-05-22T15:22:53Z"
description: |
The Loki Operator for OCP provides a means for configuring and managing a Loki stack for cluster logging.
## Prerequisites and Requirements
diff --git a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
index 24037878ac6d2..ad8189744c786 100644
--- a/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
+++ b/operator/bundle/openshift/manifests/loki.grafana.com_lokistacks.yaml
@@ -151,7 +151,7 @@ spec:
format: int32
type: integer
queryTimeout:
- default: 1m
+ default: 3m
description: Timeout when querying ingesters or storage
during the execution of a query request.
type: string
@@ -264,7 +264,7 @@ spec:
format: int32
type: integer
queryTimeout:
- default: 1m
+ default: 3m
description: Timeout when querying ingesters or storage
during the execution of a query request.
type: string
diff --git a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
index 3f1ab640fabc8..4821a9d89b1a7 100644
--- a/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
+++ b/operator/config/crd/bases/loki.grafana.com_lokistacks.yaml
@@ -134,7 +134,7 @@ spec:
format: int32
type: integer
queryTimeout:
- default: 1m
+ default: 3m
description: Timeout when querying ingesters or storage
during the execution of a query request.
type: string
@@ -247,7 +247,7 @@ spec:
format: int32
type: integer
queryTimeout:
- default: 1m
+ default: 3m
description: Timeout when querying ingesters or storage
during the execution of a query request.
type: string
diff --git a/operator/docs/operator/api.md b/operator/docs/operator/api.md
index 7aaa5e4bca670..e5dc354985664 100644
--- a/operator/docs/operator/api.md
+++ b/operator/docs/operator/api.md
@@ -1302,6 +1302,21 @@ the component onto it.</p>
the component onto it.</p>
</td>
</tr>
+<tr>
+<td>
+<code>podAntiAffinity</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#podantiaffinity-v1-core">
+Kubernetes core/v1.PodAntiAffinity
+</a>
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>PodAntiAffinity defines the pod anti affinity scheduling rules to schedule pods
+of a component.</p>
+</td>
+</tr>
</tbody>
</table>
@@ -1553,6 +1568,9 @@ for the ruler is missing.</p>
</tr><tr><td><p>"PendingComponents"</p></td>
<td><p>ReasonPendingComponents when all/some LokiStack components pending dependencies</p>
</td>
+</tr><tr><td><p>"ReasonQueryTimeoutInvalid"</p></td>
+<td><p>ReasonQueryTimeoutInvalid when the QueryTimeout can not be parsed.</p>
+</td>
</tr><tr><td><p>"ReadyComponents"</p></td>
<td><p>ReasonReadyComponents when all LokiStack components are ready to serve traffic.</p>
</td>
@@ -3329,7 +3347,8 @@ int32
</td>
<td>
<em>(Optional)</em>
-<p>Zones defines an array of ZoneSpec that the scheduler will try to satisfy.</p>
+<p>Zones defines an array of ZoneSpec that the scheduler will try to satisfy.
+IMPORTANT: Make sure that the replication factor defined is less than or equal to the number of available zones.</p>
</td>
</tr>
</tbody>
diff --git a/operator/internal/handlers/lokistack_create_or_update.go b/operator/internal/handlers/lokistack_create_or_update.go
index c18d634b3f179..7f10e1c0f3e3c 100644
--- a/operator/internal/handlers/lokistack_create_or_update.go
+++ b/operator/internal/handlers/lokistack_create_or_update.go
@@ -269,6 +269,16 @@ func CreateOrUpdateLokiStack(
certRotationRequiredAt = stack.Annotations[manifests.AnnotationCertRotationRequiredAt]
}
+ timeoutConfig, err := manifests.NewTimeoutConfig(stack.Spec.Limits)
+ if err != nil {
+ ll.Error(err, "failed to parse query timeout")
+ return &status.DegradedError{
+ Message: fmt.Sprintf("Error parsing query timeout: %s", err),
+ Reason: lokiv1.ReasonQueryTimeoutInvalid,
+ Requeue: false,
+ }
+ }
+
// Here we will translate the lokiv1.LokiStack options into manifest options
opts := manifests.Options{
Name: req.Name,
@@ -286,6 +296,7 @@ func CreateOrUpdateLokiStack(
Spec: rulerConfig,
Secret: rulerSecret,
},
+ Timeouts: timeoutConfig,
Tenants: manifests.Tenants{
Secrets: tenantSecrets,
Configs: tenantConfigs,
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index 933d704ba27c4..7c5a099d30805 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -1385,6 +1385,78 @@ func TestCreateOrUpdateLokiStack_MissingTenantsSpec_SetDegraded(t *testing.T) {
require.Equal(t, degradedErr, err)
}
+func TestCreateOrUpdateLokiStack_WhenInvalidQueryTimeout_SetDegraded(t *testing.T) {
+ sw := &k8sfakes.FakeStatusWriter{}
+ k := &k8sfakes.FakeClient{}
+ r := ctrl.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ },
+ }
+
+ degradedErr := &status.DegradedError{
+ Message: `Error parsing query timeout: time: invalid duration "invalid"`,
+ Reason: lokiv1.ReasonQueryTimeoutInvalid,
+ Requeue: false,
+ }
+
+ stack := &lokiv1.LokiStack{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "LokiStack",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-stack",
+ Namespace: "some-ns",
+ UID: "b23f9a38-9672-499f-8c29-15ede74d3ece",
+ },
+ Spec: lokiv1.LokiStackSpec{
+ Size: lokiv1.SizeOneXExtraSmall,
+ Storage: lokiv1.ObjectStorageSpec{
+ Schemas: []lokiv1.ObjectStorageSchema{
+ {
+ Version: lokiv1.ObjectStorageSchemaV12,
+ EffectiveDate: "2023-05-22",
+ },
+ },
+ Secret: lokiv1.ObjectStorageSecretSpec{
+ Name: defaultSecret.Name,
+ Type: lokiv1.ObjectStorageSecretS3,
+ },
+ },
+ Tenants: &lokiv1.TenantsSpec{
+ Mode: "openshift",
+ },
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "invalid",
+ },
+ },
+ },
+ },
+ }
+
+ // Create looks up the CR first, so we need to return our fake stack
+ k.GetStub = func(_ context.Context, name types.NamespacedName, object client.Object, _ ...client.GetOption) error {
+ if r.Name == name.Name && r.Namespace == name.Namespace {
+ k.SetClientObject(object, stack)
+ }
+ if defaultSecret.Name == name.Name {
+ k.SetClientObject(object, &defaultSecret)
+ }
+ return nil
+ }
+
+ k.StatusStub = func() client.StatusWriter { return sw }
+
+ err := handlers.CreateOrUpdateLokiStack(context.TODO(), logger, r, k, scheme, featureGates)
+
+ // make sure error is returned
+ require.Error(t, err)
+ require.Equal(t, degradedErr, err)
+}
+
func TestCreateOrUpdateLokiStack_RemovesRulerResourcesWhenDisabled(t *testing.T) {
sw := &k8sfakes.FakeStatusWriter{}
k := &k8sfakes.FakeClient{}
diff --git a/operator/internal/manifests/build_test.go b/operator/internal/manifests/build_test.go
index ff150f696c8d7..b6d8b2ad0ebe2 100644
--- a/operator/internal/manifests/build_test.go
+++ b/operator/internal/manifests/build_test.go
@@ -36,6 +36,7 @@ func TestApplyUserOptions_OverrideDefaults(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
}
err := ApplyDefaultSettings(&opt)
defs := internal.StackSizeTable[size]
@@ -78,6 +79,7 @@ func TestApplyUserOptions_AlwaysSetCompactorReplicasToOne(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
}
err := ApplyDefaultSettings(&opt)
defs := internal.StackSizeTable[size]
@@ -232,6 +234,7 @@ func TestBuildAll_WithFeatureGates_ServiceMonitors(t *testing.T) {
ServingCertsService: false,
},
},
+ Timeouts: defaultTimeoutConfig,
},
},
{
@@ -250,6 +253,7 @@ func TestBuildAll_WithFeatureGates_ServiceMonitors(t *testing.T) {
ServingCertsService: false,
},
},
+ Timeouts: defaultTimeoutConfig,
},
},
}
@@ -292,6 +296,7 @@ func TestBuildAll_WithFeatureGates_OpenShift_ServingCertsService(t *testing.T) {
ServingCertsService: false,
},
},
+ Timeouts: defaultTimeoutConfig,
},
},
{
@@ -309,6 +314,7 @@ func TestBuildAll_WithFeatureGates_OpenShift_ServingCertsService(t *testing.T) {
ServingCertsService: true,
},
},
+ Timeouts: defaultTimeoutConfig,
},
},
}
@@ -349,6 +355,7 @@ func TestBuildAll_WithFeatureGates_HTTPEncryption(t *testing.T) {
Gates: configv1.FeatureGates{
HTTPEncryption: true,
},
+ Timeouts: defaultTimeoutConfig,
}
err := ApplyDefaultSettings(&opts)
@@ -422,6 +429,7 @@ func TestBuildAll_WithFeatureGates_ServiceMonitorTLSEndpoints(t *testing.T) {
HTTPEncryption: true,
ServiceMonitorTLSEndpoints: true,
},
+ Timeouts: defaultTimeoutConfig,
}
err := ApplyDefaultSettings(&opts)
@@ -526,6 +534,7 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
Gates: configv1.FeatureGates{
GRPCEncryption: false,
},
+ Timeouts: defaultTimeoutConfig,
},
},
{
@@ -568,6 +577,7 @@ func TestBuildAll_WithFeatureGates_GRPCEncryption(t *testing.T) {
Gates: configv1.FeatureGates{
GRPCEncryption: true,
},
+ Timeouts: defaultTimeoutConfig,
},
},
}
@@ -692,6 +702,7 @@ func TestBuildAll_WithFeatureGates_RuntimeSeccompProfile(t *testing.T) {
Gates: configv1.FeatureGates{
RuntimeSeccompProfile: false,
},
+ Timeouts: defaultTimeoutConfig,
},
},
{
@@ -734,6 +745,7 @@ func TestBuildAll_WithFeatureGates_RuntimeSeccompProfile(t *testing.T) {
Gates: configv1.FeatureGates{
RuntimeSeccompProfile: true,
},
+ Timeouts: defaultTimeoutConfig,
},
},
}
@@ -797,6 +809,7 @@ func TestBuildAll_WithFeatureGates_LokiStackGateway(t *testing.T) {
HTTPEncryption: true,
ServiceMonitorTLSEndpoints: false,
},
+ Timeouts: defaultTimeoutConfig,
},
},
{
@@ -835,6 +848,7 @@ func TestBuildAll_WithFeatureGates_LokiStackGateway(t *testing.T) {
HTTPEncryption: true,
ServiceMonitorTLSEndpoints: true,
},
+ Timeouts: defaultTimeoutConfig,
},
},
}
@@ -873,6 +887,7 @@ func TestBuildAll_WithFeatureGates_LokiStackAlerts(t *testing.T) {
ServiceMonitors: false,
LokiStackAlerts: false,
},
+ Timeouts: defaultTimeoutConfig,
},
},
{
@@ -887,6 +902,7 @@ func TestBuildAll_WithFeatureGates_LokiStackAlerts(t *testing.T) {
ServiceMonitors: true,
LokiStackAlerts: true,
},
+ Timeouts: defaultTimeoutConfig,
},
},
}
diff --git a/operator/internal/manifests/config.go b/operator/internal/manifests/config.go
index 4cc36623085e8..6ac16a2add034 100644
--- a/operator/internal/manifests/config.go
+++ b/operator/internal/manifests/config.go
@@ -172,6 +172,7 @@ func ConfigOptions(opt Options) config.Options {
IngesterMemoryRequest: opt.ResourceRequirements.Ingester.Requests.Memory().Value(),
},
ObjectStorage: opt.ObjectStorage,
+ HTTPTimeouts: opt.Timeouts.Loki,
EnableRemoteReporting: opt.Gates.GrafanaLabsUsageReport,
Ruler: config.Ruler{
Enabled: rulerEnabled,
diff --git a/operator/internal/manifests/config_test.go b/operator/internal/manifests/config_test.go
index de235af16f141..f0df0593a43d0 100644
--- a/operator/internal/manifests/config_test.go
+++ b/operator/internal/manifests/config_test.go
@@ -4,6 +4,7 @@ import (
"encoding/json"
"math/rand"
"testing"
+ "time"
"github.com/google/uuid"
@@ -41,11 +42,22 @@ func TestConfigOptions_UserOptionsTakePrecedence(t *testing.T) {
assert.JSONEq(t, string(expected), string(actual))
}
+func testTimeoutConfig() TimeoutConfig {
+ return TimeoutConfig{
+ Loki: config.HTTPTimeoutConfig{
+ IdleTimeout: 1 * time.Second,
+ ReadTimeout: 1 * time.Minute,
+ WriteTimeout: 10 * time.Minute,
+ },
+ }
+}
+
func randomConfigOptions() Options {
return Options{
Name: uuid.New().String(),
Namespace: uuid.New().String(),
Image: uuid.New().String(),
+ Timeouts: testTimeoutConfig(),
Stack: lokiv1.LokiStackSpec{
Size: lokiv1.SizeOneXExtraSmall,
Storage: lokiv1.ObjectStorageSpec{},
@@ -256,6 +268,7 @@ func TestConfigOptions_GossipRingConfig(t *testing.T) {
Name: "my-stack",
Namespace: "my-ns",
Stack: tc.spec,
+ Timeouts: testTimeoutConfig(),
}
options := ConfigOptions(inOpt)
require.Equal(t, tc.wantOptions, options.GossipRing)
@@ -361,7 +374,8 @@ func TestConfigOptions_RetentionConfig(t *testing.T) {
t.Parallel()
inOpt := Options{
- Stack: tc.spec,
+ Stack: tc.spec,
+ Timeouts: testTimeoutConfig(),
}
options := ConfigOptions(inOpt)
require.Equal(t, tc.wantOptions, options.Retention)
@@ -383,6 +397,7 @@ func TestConfigOptions_RulerAlertManager(t *testing.T) {
Mode: lokiv1.Static,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
},
@@ -394,6 +409,7 @@ func TestConfigOptions_RulerAlertManager(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
},
@@ -405,6 +421,7 @@ func TestConfigOptions_RulerAlertManager(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: testTimeoutConfig(),
OpenShiftOptions: openshift.Options{
BuildOpts: openshift.BuildOptions{
AlertManagerEnabled: true,
@@ -426,6 +443,7 @@ func TestConfigOptions_RulerAlertManager(t *testing.T) {
Mode: lokiv1.OpenshiftNetwork,
},
},
+ Timeouts: testTimeoutConfig(),
OpenShiftOptions: openshift.Options{
BuildOpts: openshift.BuildOptions{
AlertManagerEnabled: true,
@@ -469,6 +487,7 @@ func TestConfigOptions_RulerAlertManager_UserOverride(t *testing.T) {
Mode: lokiv1.Static,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
},
@@ -480,6 +499,7 @@ func TestConfigOptions_RulerAlertManager_UserOverride(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
},
@@ -494,6 +514,7 @@ func TestConfigOptions_RulerAlertManager_UserOverride(t *testing.T) {
Enabled: true,
},
},
+ Timeouts: testTimeoutConfig(),
Ruler: Ruler{
Spec: &lokiv1.RulerConfigSpec{
AlertManagerSpec: &lokiv1.AlertManagerSpec{
@@ -530,6 +551,7 @@ func TestConfigOptions_RulerAlertManager_UserOverride(t *testing.T) {
Enabled: true,
},
},
+ Timeouts: testTimeoutConfig(),
Ruler: Ruler{
Spec: &lokiv1.RulerConfigSpec{
AlertManagerSpec: &lokiv1.AlertManagerSpec{
@@ -584,6 +606,7 @@ func TestConfigOptions_RulerOverrides_OCPApplicationTenant(t *testing.T) {
Mode: lokiv1.Static,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
},
@@ -595,6 +618,7 @@ func TestConfigOptions_RulerOverrides_OCPApplicationTenant(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
},
@@ -609,6 +633,7 @@ func TestConfigOptions_RulerOverrides_OCPApplicationTenant(t *testing.T) {
Enabled: true,
},
},
+ Timeouts: testTimeoutConfig(),
Ruler: Ruler{
Spec: &lokiv1.RulerConfigSpec{
AlertManagerSpec: &lokiv1.AlertManagerSpec{
@@ -662,6 +687,7 @@ func TestConfigOptions_RulerOverrides_OCPApplicationTenant(t *testing.T) {
Enabled: true,
},
},
+ Timeouts: testTimeoutConfig(),
Ruler: Ruler{
Spec: &lokiv1.RulerConfigSpec{
AlertManagerSpec: &lokiv1.AlertManagerSpec{
@@ -711,6 +737,7 @@ func TestConfigOptions_RulerOverrides(t *testing.T) {
Mode: lokiv1.Static,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
},
@@ -722,6 +749,7 @@ func TestConfigOptions_RulerOverrides(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
},
@@ -736,6 +764,7 @@ func TestConfigOptions_RulerOverrides(t *testing.T) {
Enabled: true,
},
},
+ Timeouts: testTimeoutConfig(),
Ruler: Ruler{
Spec: &lokiv1.RulerConfigSpec{
AlertManagerSpec: &lokiv1.AlertManagerSpec{
@@ -867,6 +896,7 @@ func TestConfigOptions_RulerOverrides(t *testing.T) {
Enabled: true,
},
},
+ Timeouts: testTimeoutConfig(),
Ruler: Ruler{
Spec: &lokiv1.RulerConfigSpec{
AlertManagerSpec: &lokiv1.AlertManagerSpec{
@@ -917,6 +947,7 @@ func TestConfigOptions_RulerOverrides_OCPUserWorkloadOnlyEnabled(t *testing.T) {
Mode: lokiv1.Static,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
wantOverridesOptions: nil,
@@ -929,6 +960,7 @@ func TestConfigOptions_RulerOverrides_OCPUserWorkloadOnlyEnabled(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: testTimeoutConfig(),
},
wantOptions: nil,
wantOverridesOptions: nil,
@@ -944,6 +976,7 @@ func TestConfigOptions_RulerOverrides_OCPUserWorkloadOnlyEnabled(t *testing.T) {
Enabled: true,
},
},
+ Timeouts: testTimeoutConfig(),
Ruler: Ruler{
Spec: &lokiv1.RulerConfigSpec{
AlertManagerSpec: &lokiv1.AlertManagerSpec{
@@ -1003,6 +1036,8 @@ func TestConfigOptions_RulerOverrides_OCPUserWorkloadOnlyEnabled(t *testing.T) {
Enabled: true,
},
},
+
+ Timeouts: testTimeoutConfig(),
Ruler: Ruler{
Spec: &lokiv1.RulerConfigSpec{
AlertManagerSpec: &lokiv1.AlertManagerSpec{
@@ -1131,10 +1166,27 @@ func TestConfigOptions_Replication(t *testing.T) {
t.Parallel()
inOpt := Options{
- Stack: tc.spec,
+ Stack: tc.spec,
+ Timeouts: testTimeoutConfig(),
}
options := ConfigOptions(inOpt)
require.Equal(t, tc.wantOptions, *options.Stack.Replication)
})
}
}
+
+func TestConfigOptions_ServerOptions(t *testing.T) {
+ opt := Options{
+ Stack: lokiv1.LokiStackSpec{},
+ Timeouts: testTimeoutConfig(),
+ }
+ got := ConfigOptions(opt)
+
+ want := config.HTTPTimeoutConfig{
+ IdleTimeout: time.Second,
+ ReadTimeout: time.Minute,
+ WriteTimeout: 10 * time.Minute,
+ }
+
+ require.Equal(t, want, got.HTTPTimeouts)
+}
diff --git a/operator/internal/manifests/gateway.go b/operator/internal/manifests/gateway.go
index 7907b7bf9211e..cc6d7444b093a 100644
--- a/operator/internal/manifests/gateway.go
+++ b/operator/internal/manifests/gateway.go
@@ -142,8 +142,11 @@ func NewGatewayDeployment(opts Options, sha1C string) *appsv1.Deployment {
fmt.Sprintf("--logs.read.endpoint=http://%s:%d", fqdn(serviceNameQueryFrontendHTTP(opts.Name), opts.Namespace), httpPort),
fmt.Sprintf("--logs.tail.endpoint=http://%s:%d", fqdn(serviceNameQueryFrontendHTTP(opts.Name), opts.Namespace), httpPort),
fmt.Sprintf("--logs.write.endpoint=http://%s:%d", fqdn(serviceNameDistributorHTTP(opts.Name), opts.Namespace), httpPort),
+ fmt.Sprintf("--logs.write-timeout=%s", opts.Timeouts.Gateway.UpstreamWriteTimeout),
fmt.Sprintf("--rbac.config=%s", path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayRbacFileName)),
fmt.Sprintf("--tenants.config=%s", path.Join(gateway.LokiGatewayMountDir, gateway.LokiGatewayTenantFileName)),
+ fmt.Sprintf("--server.read-timeout=%s", opts.Timeouts.Gateway.ReadTimeout),
+ fmt.Sprintf("--server.write-timeout=%s", opts.Timeouts.Gateway.WriteTimeout),
},
Ports: []corev1.ContainerPort{
{
diff --git a/operator/internal/manifests/gateway_tenants.go b/operator/internal/manifests/gateway_tenants.go
index 1d28c8b19c959..25805690ca6aa 100644
--- a/operator/internal/manifests/gateway_tenants.go
+++ b/operator/internal/manifests/gateway_tenants.go
@@ -3,12 +3,13 @@ package manifests
import (
"github.com/ViaQ/logerr/v2/kverrors"
+ "github.com/imdario/mergo"
+ monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
+
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
"github.com/grafana/loki/operator/internal/manifests/internal/config"
"github.com/grafana/loki/operator/internal/manifests/openshift"
- "github.com/imdario/mergo"
- monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -35,6 +36,7 @@ func ApplyGatewayDefaultOptions(opts *Options) error {
GatewayName(opts.Name),
serviceNameGatewayHTTP(opts.Name),
gatewayHTTPPortName,
+ opts.Timeouts.Gateway.WriteTimeout,
ComponentLabels(LabelGatewayComponent, opts.Name),
RulerName(opts.Name),
)
diff --git a/operator/internal/manifests/gateway_tenants_test.go b/operator/internal/manifests/gateway_tenants_test.go
index a8d582e67ae0d..843194954a01b 100644
--- a/operator/internal/manifests/gateway_tenants_test.go
+++ b/operator/internal/manifests/gateway_tenants_test.go
@@ -3,6 +3,7 @@ package manifests
import (
"path"
"testing"
+ "time"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stretchr/testify/require"
@@ -58,6 +59,11 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Mode: lokiv1.Static,
},
},
+ Timeouts: TimeoutConfig{
+ Gateway: GatewayTimeoutConfig{
+ WriteTimeout: 1 * time.Minute,
+ },
+ },
},
want: &Options{
Name: "lokistack-ocp",
@@ -73,6 +79,11 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Mode: lokiv1.Static,
},
},
+ Timeouts: TimeoutConfig{
+ Gateway: GatewayTimeoutConfig{
+ WriteTimeout: 1 * time.Minute,
+ },
+ },
OpenShiftOptions: openshift.Options{
BuildOpts: openshift.BuildOptions{
LokiStackName: "lokistack-ocp",
@@ -80,6 +91,7 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
GatewayName: "lokistack-ocp-gateway",
GatewaySvcName: "lokistack-ocp-gateway-http",
GatewaySvcTargetPort: "public",
+ GatewayRouteTimeout: 75 * time.Second,
RulerName: "lokistack-ocp-ruler",
Labels: ComponentLabels(LabelGatewayComponent, "lokistack-ocp"),
},
@@ -119,6 +131,11 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: TimeoutConfig{
+ Gateway: GatewayTimeoutConfig{
+ WriteTimeout: 1 * time.Minute,
+ },
+ },
},
want: &Options{
Name: "lokistack-ocp",
@@ -134,6 +151,11 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: TimeoutConfig{
+ Gateway: GatewayTimeoutConfig{
+ WriteTimeout: 1 * time.Minute,
+ },
+ },
OpenShiftOptions: openshift.Options{
BuildOpts: openshift.BuildOptions{
LokiStackName: "lokistack-ocp",
@@ -141,6 +163,7 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
GatewayName: "lokistack-ocp-gateway",
GatewaySvcName: "lokistack-ocp-gateway-http",
GatewaySvcTargetPort: "public",
+ GatewayRouteTimeout: 75 * time.Second,
RulerName: "lokistack-ocp-ruler",
Labels: ComponentLabels(LabelGatewayComponent, "lokistack-ocp"),
},
@@ -163,6 +186,11 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: TimeoutConfig{
+ Gateway: GatewayTimeoutConfig{
+ WriteTimeout: 1 * time.Minute,
+ },
+ },
Tenants: Tenants{
Configs: map[string]TenantConfig{
"application": {
@@ -197,6 +225,11 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: TimeoutConfig{
+ Gateway: GatewayTimeoutConfig{
+ WriteTimeout: 1 * time.Minute,
+ },
+ },
Tenants: Tenants{
Configs: map[string]TenantConfig{
"application": {
@@ -223,6 +256,7 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
GatewayName: "lokistack-ocp-gateway",
GatewaySvcName: "lokistack-ocp-gateway-http",
GatewaySvcTargetPort: "public",
+ GatewayRouteTimeout: 75 * time.Second,
RulerName: "lokistack-ocp-ruler",
Labels: ComponentLabels(LabelGatewayComponent, "lokistack-ocp"),
},
@@ -268,6 +302,11 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Mode: lokiv1.OpenshiftNetwork,
},
},
+ Timeouts: TimeoutConfig{
+ Gateway: GatewayTimeoutConfig{
+ WriteTimeout: 1 * time.Minute,
+ },
+ },
Tenants: Tenants{
Configs: map[string]TenantConfig{
"network": {
@@ -292,6 +331,11 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
Mode: lokiv1.OpenshiftNetwork,
},
},
+ Timeouts: TimeoutConfig{
+ Gateway: GatewayTimeoutConfig{
+ WriteTimeout: 1 * time.Minute,
+ },
+ },
Tenants: Tenants{
Configs: map[string]TenantConfig{
"network": {
@@ -308,6 +352,7 @@ func TestApplyGatewayDefaultsOptions(t *testing.T) {
GatewayName: "lokistack-ocp-gateway",
GatewaySvcName: "lokistack-ocp-gateway-http",
GatewaySvcTargetPort: "public",
+ GatewayRouteTimeout: 75 * time.Second,
RulerName: "lokistack-ocp-ruler",
Labels: ComponentLabels(LabelGatewayComponent, "lokistack-ocp"),
},
diff --git a/operator/internal/manifests/gateway_test.go b/operator/internal/manifests/gateway_test.go
index a010a218c5575..6517e4c1a71e7 100644
--- a/operator/internal/manifests/gateway_test.go
+++ b/operator/internal/manifests/gateway_test.go
@@ -48,6 +48,7 @@ func TestNewGatewayDeployment_HasTemplateConfigHashAnnotation(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
}, sha1C)
expected := "loki.grafana.com/config-hash"
@@ -95,6 +96,7 @@ func TestNewGatewayDeployment_HasNodeSelector(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
}, "deadbeef")
require.Equal(t, dpl.Spec.Template.Spec.NodeSelector, selector)
@@ -129,6 +131,7 @@ func TestNewGatewayDeployment_HasTemplateCertRotationRequiredAtAnnotation(t *tes
},
},
},
+ Timeouts: defaultTimeoutConfig,
}, sha1C)
expected := "loki.grafana.com/certRotationRequiredAt"
@@ -187,6 +190,7 @@ func TestGatewayConfigMap_ReturnsSHA1OfBinaryContents(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
Tenants: Tenants{
Secrets: []*TenantSecrets{
{
@@ -221,6 +225,7 @@ func TestBuildGateway_HasConfigForTenantMode(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
})
require.NoError(t, err)
@@ -257,6 +262,7 @@ func TestBuildGateway_HasExtraObjectsForTenantMode(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
})
require.NoError(t, err)
@@ -292,6 +298,7 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_RouteSvcMatches(t *testing.T
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
})
require.NoError(t, err)
@@ -329,6 +336,7 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_ServiceAccountNameMatches(t
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
})
require.NoError(t, err)
@@ -367,6 +375,7 @@ func TestBuildGateway_WithExtraObjectsForTenantMode_ReplacesIngressWithRoute(t *
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
})
require.NoError(t, err)
@@ -432,6 +441,7 @@ func TestBuildGateway_WithTLSProfile(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
},
expectedArgs: []string{
"--tls.min-version=min-version",
@@ -462,6 +472,7 @@ func TestBuildGateway_WithTLSProfile(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: defaultTimeoutConfig,
},
expectedArgs: []string{
"--tls.min-version=min-version",
@@ -492,6 +503,7 @@ func TestBuildGateway_WithTLSProfile(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
},
expectedArgs: []string{
"--tls.min-version=min-version",
@@ -563,6 +575,7 @@ func TestBuildGateway_WithRulesEnabled(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
},
missingArgs: []string{
"--logs.rules.endpoint=http://abcd-ruler-http.efgh.svc.cluster.local:3100",
@@ -612,6 +625,7 @@ func TestBuildGateway_WithRulesEnabled(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
},
wantArgs: []string{
"--logs.rules.endpoint=http://abcd-ruler-http.efgh.svc.cluster.local:3100",
@@ -639,6 +653,7 @@ func TestBuildGateway_WithRulesEnabled(t *testing.T) {
Mode: lokiv1.Dynamic,
},
},
+ Timeouts: defaultTimeoutConfig,
},
wantArgs: []string{
"--logs.rules.endpoint=http://abcd-ruler-http.efgh.svc.cluster.local:3100",
@@ -670,6 +685,7 @@ func TestBuildGateway_WithRulesEnabled(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
},
wantArgs: []string{
"--logs.rules.endpoint=https://abcd-ruler-http.efgh.svc.cluster.local:3100",
@@ -701,6 +717,7 @@ func TestBuildGateway_WithRulesEnabled(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
},
wantArgs: []string{
"--logs.rules.endpoint=https://abcd-ruler-http.efgh.svc.cluster.local:3100",
@@ -754,6 +771,7 @@ func TestBuildGateway_WithHTTPEncryption(t *testing.T) {
Authentication: []lokiv1.AuthenticationSpec{},
},
},
+ Timeouts: defaultTimeoutConfig,
})
require.NoError(t, err)
@@ -773,8 +791,11 @@ func TestBuildGateway_WithHTTPEncryption(t *testing.T) {
"--logs.read.endpoint=https://abcd-query-frontend-http.efgh.svc.cluster.local:3100",
"--logs.tail.endpoint=https://abcd-query-frontend-http.efgh.svc.cluster.local:3100",
"--logs.write.endpoint=https://abcd-distributor-http.efgh.svc.cluster.local:3100",
+ "--logs.write-timeout=4m0s",
"--rbac.config=/etc/lokistack-gateway/rbac.yaml",
"--tenants.config=/etc/lokistack-gateway/tenants.yaml",
+ "--server.read-timeout=48s",
+ "--server.write-timeout=6m0s",
"--logs.rules.endpoint=https://abcd-ruler-http.efgh.svc.cluster.local:3100",
"--logs.rules.read-only=true",
"--tls.client-auth-type=NoClientCert",
@@ -926,6 +947,7 @@ func TestBuildGateway_PodDisruptionBudget(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
}
objs, err := BuildGateway(opts)
require.NoError(t, err)
@@ -957,6 +979,7 @@ func TestBuildGateway_TopologySpreadConstraint(t *testing.T) {
Mode: lokiv1.OpenshiftLogging,
},
},
+ Timeouts: defaultTimeoutConfig,
}, "deadbeef")
require.EqualValues(t, dpl.Spec.Template.Spec.TopologySpreadConstraints, []corev1.TopologySpreadConstraint{
diff --git a/operator/internal/manifests/internal/config/build_test.go b/operator/internal/manifests/internal/config/build_test.go
index 78c8a64da6b58..d6e44811b6416 100644
--- a/operator/internal/manifests/internal/config/build_test.go
+++ b/operator/internal/manifests/internal/config/build_test.go
@@ -2,6 +2,7 @@ package config
import (
"testing"
+ "time"
"github.com/stretchr/testify/require"
"k8s.io/utils/pointer"
@@ -152,8 +153,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -247,6 +249,11 @@ overrides:
},
},
EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -395,8 +402,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -520,6 +528,11 @@ overrides:
},
},
},
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -799,8 +812,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -941,6 +955,11 @@ overrides:
},
},
EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -1143,8 +1162,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -1286,6 +1306,11 @@ overrides:
},
},
EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -1501,8 +1526,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -1661,6 +1687,11 @@ overrides:
},
},
EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -1817,8 +1848,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -1981,6 +2013,11 @@ overrides:
Enabled: true,
DeleteWorkerCount: 50,
},
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -2209,8 +2246,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -2386,6 +2424,11 @@ overrides:
},
},
EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -2572,8 +2615,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
tls_min_version: VersionTLS12
tls_cipher_suites: cipher1,cipher2
http_tls_config:
@@ -2718,6 +2762,11 @@ overrides:
},
},
EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -2946,8 +2995,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -3200,6 +3250,11 @@ overrides:
},
},
EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
@@ -3350,8 +3405,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: 30s
+ http_server_read_timeout: 30s
+ http_server_write_timeout: 10m0s
log_level: info
storage_config:
boltdb_shipper:
@@ -3446,6 +3502,11 @@ overrides:
},
},
EnableRemoteReporting: true,
+ HTTPTimeouts: HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 30 * time.Second,
+ WriteTimeout: 10 * time.Minute,
+ },
}
cfg, rCfg, err := Build(opts)
require.NoError(t, err)
diff --git a/operator/internal/manifests/internal/config/loki-config.yaml b/operator/internal/manifests/internal/config/loki-config.yaml
index a2db6e3a6eeb8..4e44f081e8938 100644
--- a/operator/internal/manifests/internal/config/loki-config.yaml
+++ b/operator/internal/manifests/internal/config/loki-config.yaml
@@ -422,8 +422,9 @@ server:
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
http_listen_port: 3100
- http_server_idle_timeout: 120s
- http_server_write_timeout: 1m
+ http_server_idle_timeout: {{ .HTTPTimeouts.IdleTimeout }}
+ http_server_read_timeout: {{ .HTTPTimeouts.ReadTimeout }}
+ http_server_write_timeout: {{ .HTTPTimeouts.WriteTimeout }}
{{- if or .Gates.HTTPEncryption .Gates.GRPCEncryption }}
tls_min_version: {{ .TLS.MinTLSVersion }}
tls_cipher_suites: {{ .TLS.CipherSuitesString }}
diff --git a/operator/internal/manifests/internal/config/options.go b/operator/internal/manifests/internal/config/options.go
index 557e7d570616a..3a7698c851ca1 100644
--- a/operator/internal/manifests/internal/config/options.go
+++ b/operator/internal/manifests/internal/config/options.go
@@ -4,6 +4,7 @@ import (
"fmt"
"math"
"strings"
+ "time"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
@@ -31,6 +32,8 @@ type Options struct {
ObjectStorage storage.Options
+ HTTPTimeouts HTTPTimeoutConfig
+
Retention RetentionOptions
Overrides map[string]LokiOverrides
@@ -67,6 +70,13 @@ type GossipRing struct {
MembersDiscoveryAddr string
}
+// HTTPTimeoutConfig defines the HTTP server config options.
+type HTTPTimeoutConfig struct {
+ IdleTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+}
+
// Ruler configuration
type Ruler struct {
Enabled bool
diff --git a/operator/internal/manifests/openshift/build_test.go b/operator/internal/manifests/openshift/build_test.go
index 0f90b7c693505..875138614629c 100644
--- a/operator/internal/manifests/openshift/build_test.go
+++ b/operator/internal/manifests/openshift/build_test.go
@@ -1,17 +1,20 @@
package openshift
import (
+ "fmt"
"testing"
+ "time"
"github.com/stretchr/testify/require"
- lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
func TestBuildGatewayTenantModeObjects_ClusterRoleRefMatches(t *testing.T) {
- opts := NewOptions("abc", "ns", "abc", "abc", "abc", map[string]string{}, "abc").
+ opts := NewOptions("abc", "ns", "abc", "abc", "abc", 1*time.Minute, map[string]string{}, "abc").
WithTenantsForMode(lokiv1.OpenshiftLogging, "example.com", map[string]TenantData{})
objs := BuildGatewayTenantModeObjects(*opts)
@@ -23,7 +26,7 @@ func TestBuildGatewayTenantModeObjects_ClusterRoleRefMatches(t *testing.T) {
}
func TestBuildGatewayObjects_MonitoringClusterRoleRefMatches(t *testing.T) {
- opts := NewOptions("abc", "ns", "abc", "abc", "abc", map[string]string{}, "abc")
+ opts := NewOptions("abc", "ns", "abc", "abc", "abc", 1*time.Minute, map[string]string{}, "abc")
objs := BuildGatewayObjects(*opts)
cr := objs[2].(*rbacv1.Role)
@@ -33,8 +36,23 @@ func TestBuildGatewayObjects_MonitoringClusterRoleRefMatches(t *testing.T) {
require.Equal(t, cr.Name, rb.RoleRef.Name)
}
+func TestBuildGatewayObjets_RouteWithTimeoutAnnotation(t *testing.T) {
+ gwWriteTimeout := 1 * time.Minute
+ opts := NewOptions("abc", "ns", "abc", "abc", "abc", gwWriteTimeout, map[string]string{}, "abc")
+
+ objs := BuildGatewayObjects(*opts)
+ a := objs[0].GetAnnotations()
+
+ got, ok := a[annotationGatewayRouteTimeout]
+ require.True(t, ok)
+
+ routeTimeout := gwWriteTimeout + gatewayRouteTimeoutExtension
+ want := fmt.Sprintf("%.fs", routeTimeout.Seconds())
+ require.Equal(t, want, got)
+}
+
func TestBuildRulerObjects_ClusterRoleRefMatches(t *testing.T) {
- opts := NewOptions("abc", "ns", "abc", "abc", "abc", map[string]string{}, "abc")
+ opts := NewOptions("abc", "ns", "abc", "abc", "abc", 1*time.Minute, map[string]string{}, "abc")
objs := BuildRulerObjects(*opts)
sa := objs[1].(*corev1.ServiceAccount)
diff --git a/operator/internal/manifests/openshift/options.go b/operator/internal/manifests/openshift/options.go
index 8bca7b82fb384..2ebf5ebde1f46 100644
--- a/operator/internal/manifests/openshift/options.go
+++ b/operator/internal/manifests/openshift/options.go
@@ -3,6 +3,7 @@ package openshift
import (
"fmt"
"math/rand"
+ "time"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
)
@@ -41,6 +42,7 @@ type BuildOptions struct {
GatewayName string
GatewaySvcName string
GatewaySvcTargetPort string
+ GatewayRouteTimeout time.Duration
RulerName string
Labels map[string]string
AlertManagerEnabled bool
@@ -56,6 +58,7 @@ type TenantData struct {
func NewOptions(
stackName, stackNamespace string,
gwName, gwSvcName, gwPortName string,
+ gwWriteTimeout time.Duration,
gwLabels map[string]string,
rulerName string,
) *Options {
@@ -66,6 +69,7 @@ func NewOptions(
GatewayName: gwName,
GatewaySvcName: gwSvcName,
GatewaySvcTargetPort: gwPortName,
+ GatewayRouteTimeout: gwWriteTimeout + gatewayRouteTimeoutExtension,
Labels: gwLabels,
RulerName: rulerName,
},
diff --git a/operator/internal/manifests/openshift/route.go b/operator/internal/manifests/openshift/route.go
index 18a9f12d71b84..140595b9bed66 100644
--- a/operator/internal/manifests/openshift/route.go
+++ b/operator/internal/manifests/openshift/route.go
@@ -1,6 +1,8 @@
package openshift
import (
+ "fmt"
+
routev1 "github.com/openshift/api/route/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -19,6 +21,9 @@ func BuildRoute(opts Options) client.Object {
Name: routeName(opts),
Namespace: opts.BuildOpts.LokiStackNamespace,
Labels: opts.BuildOpts.Labels,
+ Annotations: map[string]string{
+ annotationGatewayRouteTimeout: fmt.Sprintf("%.fs", opts.BuildOpts.GatewayRouteTimeout.Seconds()),
+ },
},
Spec: routev1.RouteSpec{
To: routev1.RouteTargetReference{
diff --git a/operator/internal/manifests/openshift/var.go b/operator/internal/manifests/openshift/var.go
index cda53dc0d0844..ab4ec0996e598 100644
--- a/operator/internal/manifests/openshift/var.go
+++ b/operator/internal/manifests/openshift/var.go
@@ -2,6 +2,13 @@ package openshift
import (
"fmt"
+ "time"
+)
+
+const (
+ annotationGatewayRouteTimeout = "haproxy.router.openshift.io/timeout"
+
+ gatewayRouteTimeoutExtension = 15 * time.Second
)
var (
diff --git a/operator/internal/manifests/options.go b/operator/internal/manifests/options.go
index 3d9547b610ac8..24e07e375ef47 100644
--- a/operator/internal/manifests/options.go
+++ b/operator/internal/manifests/options.go
@@ -2,6 +2,9 @@ package manifests
import (
"strings"
+ "time"
+
+ "github.com/grafana/loki/operator/internal/manifests/internal/config"
configv1 "github.com/grafana/loki/operator/apis/config/v1"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
@@ -34,11 +37,26 @@ type Options struct {
OpenShiftOptions openshift.Options
+ Timeouts TimeoutConfig
+
Tenants Tenants
TLSProfile TLSProfileSpec
}
+// GatewayTimeoutConfig contains the http server configuration options for all Loki components.
+type GatewayTimeoutConfig struct {
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ UpstreamWriteTimeout time.Duration
+}
+
+// TimeoutConfig contains the server configuration options for all Loki components
+type TimeoutConfig struct {
+ Loki config.HTTPTimeoutConfig
+ Gateway GatewayTimeoutConfig
+}
+
// Tenants contains the configuration per tenant and secrets for authn/authz.
// Secrets are required only for modes static and dynamic to reconcile the OIDC provider.
// Configs are required only for all modes to reconcile rules and gateway configuration.
@@ -105,3 +123,67 @@ type TLSProfileSpec struct {
func (o Options) TLSCipherSuites() string {
return strings.Join(o.TLSProfile.Ciphers, ",")
}
+
+// NewTimeoutConfig creates a TimeoutConfig from the QueryTimeout values in the spec's limits.
+func NewTimeoutConfig(s *lokiv1.LimitsSpec) (TimeoutConfig, error) {
+ if s == nil {
+ return defaultTimeoutConfig, nil
+ }
+
+ if s.Global == nil && s.Tenants == nil {
+ return defaultTimeoutConfig, nil
+ }
+
+ queryTimeout := lokiDefaultQueryTimeout
+ if s.Global.QueryLimits != nil && s.Global.QueryLimits.QueryTimeout != "" {
+ var err error
+ globalQueryTimeout, err := time.ParseDuration(s.Global.QueryLimits.QueryTimeout)
+ if err != nil {
+ return TimeoutConfig{}, err
+ }
+
+ if globalQueryTimeout > queryTimeout {
+ queryTimeout = globalQueryTimeout
+ }
+ }
+
+ for _, tLimit := range s.Tenants {
+ if tLimit.QueryLimits == nil || tLimit.QueryLimits.QueryTimeout == "" {
+ continue
+ }
+
+ tenantQueryTimeout, err := time.ParseDuration(tLimit.QueryLimits.QueryTimeout)
+ if err != nil {
+ return TimeoutConfig{}, err
+ }
+
+ if tenantQueryTimeout > queryTimeout {
+ queryTimeout = tenantQueryTimeout
+ }
+ }
+
+ return calculateHTTPTimeouts(queryTimeout), nil
+}
+
+func calculateHTTPTimeouts(queryTimeout time.Duration) TimeoutConfig {
+ idleTimeout := lokiDefaultHTTPIdleTimeout
+ if queryTimeout < idleTimeout {
+ idleTimeout = queryTimeout
+ }
+
+ readTimeout := queryTimeout / 10
+ writeTimeout := queryTimeout + lokiQueryWriteDuration
+
+ return TimeoutConfig{
+ Loki: config.HTTPTimeoutConfig{
+ IdleTimeout: idleTimeout,
+ ReadTimeout: readTimeout,
+ WriteTimeout: writeTimeout,
+ },
+ Gateway: GatewayTimeoutConfig{
+ ReadTimeout: readTimeout + gatewayReadDuration,
+ WriteTimeout: writeTimeout + gatewayWriteDuration,
+ UpstreamWriteTimeout: writeTimeout,
+ },
+ }
+}
diff --git a/operator/internal/manifests/options_test.go b/operator/internal/manifests/options_test.go
new file mode 100644
index 0000000000000..6d49649620437
--- /dev/null
+++ b/operator/internal/manifests/options_test.go
@@ -0,0 +1,142 @@
+package manifests
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/operator/internal/manifests/internal/config"
+
+ lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
+)
+
+func TestNewTimeoutConfig_ReturnsDefaults_WhenLimitsSpecEmpty(t *testing.T) {
+ s := lokiv1.LokiStack{}
+
+ got, err := NewTimeoutConfig(s.Spec.Limits)
+ require.NoError(t, err)
+ require.Equal(t, defaultTimeoutConfig, got)
+}
+
+func TestNewTimeoutConfig_ReturnsCustomConfig_WhenLimitsSpecNotEmpty(t *testing.T) {
+ s := lokiv1.LokiStack{
+ Spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "10m",
+ },
+ },
+ },
+ },
+ }
+
+ got, err := NewTimeoutConfig(s.Spec.Limits)
+ require.NoError(t, err)
+
+ want := TimeoutConfig{
+ Loki: config.HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 1 * time.Minute,
+ WriteTimeout: 11 * time.Minute,
+ },
+ Gateway: GatewayTimeoutConfig{
+ ReadTimeout: 1*time.Minute + gatewayReadDuration,
+ WriteTimeout: 11*time.Minute + gatewayWriteDuration,
+ UpstreamWriteTimeout: 11 * time.Minute,
+ },
+ }
+
+ require.Equal(t, want, got)
+}
+
+func TestNewTimeoutConfig_ReturnsCustomConfig_WhenLimitsSpecNotEmpty_UseMaxTenantQueryTimeout(t *testing.T) {
+ s := lokiv1.LokiStack{
+ Spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "10m",
+ },
+ },
+ Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ "tenant-a": {
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "10m",
+ },
+ },
+ "tenant-b": {
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "20m",
+ },
+ },
+ },
+ },
+ },
+ }
+
+ got, err := NewTimeoutConfig(s.Spec.Limits)
+ require.NoError(t, err)
+
+ want := TimeoutConfig{
+ Loki: config.HTTPTimeoutConfig{
+ IdleTimeout: 30 * time.Second,
+ ReadTimeout: 2 * time.Minute,
+ WriteTimeout: 21 * time.Minute,
+ },
+ Gateway: GatewayTimeoutConfig{
+ ReadTimeout: 2*time.Minute + gatewayReadDuration,
+ WriteTimeout: 21*time.Minute + gatewayWriteDuration,
+ UpstreamWriteTimeout: 21 * time.Minute,
+ },
+ }
+
+ require.Equal(t, want, got)
+}
+
+func TestNewTimeoutConfig_ReturnsDefaults_WhenGlobalQueryTimeoutParseError(t *testing.T) {
+ s := lokiv1.LokiStack{
+ Spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "invalid",
+ },
+ },
+ },
+ },
+ }
+
+ _, err := NewTimeoutConfig(s.Spec.Limits)
+ require.Error(t, err)
+}
+
+func TestNewTimeoutConfig_ReturnsDefaults_WhenTenantQueryTimeoutParseError(t *testing.T) {
+ s := lokiv1.LokiStack{
+ Spec: lokiv1.LokiStackSpec{
+ Limits: &lokiv1.LimitsSpec{
+ Global: &lokiv1.LimitsTemplateSpec{
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "10m",
+ },
+ },
+ Tenants: map[string]lokiv1.LimitsTemplateSpec{
+ "tenant-a": {
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "invalid",
+ },
+ },
+ "tenant-b": {
+ QueryLimits: &lokiv1.QueryLimitSpec{
+ QueryTimeout: "20m",
+ },
+ },
+ },
+ },
+ },
+ }
+
+ _, err := NewTimeoutConfig(s.Spec.Limits)
+ require.Error(t, err)
+}
diff --git a/operator/internal/manifests/service_test.go b/operator/internal/manifests/service_test.go
index f66a42906d2fe..58b3bc0001736 100644
--- a/operator/internal/manifests/service_test.go
+++ b/operator/internal/manifests/service_test.go
@@ -54,6 +54,7 @@ func TestServicesMatchPorts(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
}
sha1C := "deadbef"
@@ -183,6 +184,7 @@ func TestServicesMatchLabels(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
}
sha1C := "deadbef"
@@ -305,6 +307,7 @@ func TestServices_WithEncryption(t *testing.T) {
},
},
},
+ Timeouts: defaultTimeoutConfig,
TLSProfile: TLSProfileSpec{
MinTLSVersion: "VersionTLS12",
Ciphers: []string{"cipher1", "cipher2"},
diff --git a/operator/internal/manifests/var.go b/operator/internal/manifests/var.go
index b80f04b90f9aa..d94fcf0bb1ab7 100644
--- a/operator/internal/manifests/var.go
+++ b/operator/internal/manifests/var.go
@@ -3,6 +3,7 @@ package manifests
import (
"fmt"
"path"
+ "time"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
@@ -109,7 +110,20 @@ const (
kubernetesInstanceLabel = "app.kubernetes.io/instance"
)
+const (
+ // lokiDefaultQueryTimeout contains the default query timeout. It should match the value mentioned in the CRD
+ // definition and also the default in the `sizes.go`.
+ lokiDefaultQueryTimeout = 3 * time.Minute
+ lokiDefaultHTTPIdleTimeout = 30 * time.Second
+ lokiQueryWriteDuration = 1 * time.Minute
+
+ gatewayReadDuration = 30 * time.Second
+ gatewayWriteDuration = 2 * time.Minute
+)
+
var (
+ defaultTimeoutConfig = calculateHTTPTimeouts(lokiDefaultQueryTimeout)
+
defaultConfigMapMode = int32(420)
volumeFileSystemMode = corev1.PersistentVolumeFilesystem
podAntiAffinityComponents = map[string]struct{}{
|
operator
|
Add support for configuring HTTP server timeouts (#9405)
|
4721d7efd308e7d85fe03464041179bb1414fe8c
|
2023-03-23 15:48:23
|
Gerard Vanloo
|
operator: Remove mutations to non-updatable statefulset fields (#8875)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index 82e5f658f3906..8133f2755a1c0 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [8875](https://github.com/grafana/loki/pull/8875) **Red-GV**: Remove mutations to non-updatable statefulset fields
- [7451](https://github.com/grafana/loki/pull/7451) **btaani**: Add support for rules configmap sharding
- [8672](https://github.com/grafana/loki/pull/8672) **periklis**: Add support for memberlist bind network configuration
- [8748](https://github.com/grafana/loki/pull/8748) **periklis**: Add alertingrule tenant id label for all rules
diff --git a/operator/internal/manifests/mutate.go b/operator/internal/manifests/mutate.go
index 6ba0c77c891c4..d87979104de17 100644
--- a/operator/internal/manifests/mutate.go
+++ b/operator/internal/manifests/mutate.go
@@ -234,13 +234,7 @@ func mutateStatefulSet(existing, desired *appsv1.StatefulSet) error {
if existing.CreationTimestamp.IsZero() {
existing.Spec.Selector = desired.Spec.Selector
}
- existing.Spec.PodManagementPolicy = desired.Spec.PodManagementPolicy
existing.Spec.Replicas = desired.Spec.Replicas
- for i := range existing.Spec.VolumeClaimTemplates {
- existing.Spec.VolumeClaimTemplates[i].TypeMeta = desired.Spec.VolumeClaimTemplates[i].TypeMeta
- existing.Spec.VolumeClaimTemplates[i].ObjectMeta = desired.Spec.VolumeClaimTemplates[i].ObjectMeta
- existing.Spec.VolumeClaimTemplates[i].Spec = desired.Spec.VolumeClaimTemplates[i].Spec
- }
if err := mergeWithOverride(&existing.Spec.Template, desired.Spec.Template); err != nil {
return err
}
|
operator
|
Remove mutations to non-updatable statefulset fields (#8875)
|
8df5803d9a088995e2f8280bab951b3b31d80e28
|
2023-01-13 03:26:01
|
Aditya C S
|
feat(logql): Support drop labels in logql pipeline (#7975)
| false
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5b84271fd7db8..c716baebddd1e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -19,6 +19,7 @@
* [7964](https://github.com/grafana/loki/pull/7964) **slim-bean**: Add a `since` query parameter to allow querying based on relative time.
* [7989](https://github.com/grafana/loki/pull/7989) **liguozhong**: logql support `sort` and `sort_desc`.
* [7997](https://github.com/grafana/loki/pull/7997) **kavirajk**: fix(promtail): Fix cri tags extra new lines when joining partial lines
+* [7975](https://github.com/grafana/loki/pull/7975) **adityacs**: Support drop labels in logql
* [7946](https://github.com/grafana/loki/pull/7946) **ashwanthgoli** config: Add support for named stores
* [8027](https://github.com/grafana/loki/pull/8027) **kavirajk**: chore(promtail): Make `batchwait` and `batchsize` config explicit with yaml tags
* [7978](https://github.com/grafana/loki/pull/7978) **chaudum**: Shut down query frontend gracefully to allow inflight requests to complete.
diff --git a/docs/sources/logql/log_queries.md b/docs/sources/logql/log_queries.md
index 54937dc1b35fb..f2c78eb5c5603 100644
--- a/docs/sources/logql/log_queries.md
+++ b/docs/sources/logql/log_queries.md
@@ -556,3 +556,55 @@ In both cases, if the destination label doesn't exist, then a new one is created
The renaming form `dst=src` will _drop_ the `src` label after remapping it to the `dst` label. However, the _template_ form will preserve the referenced labels, such that `dst="{{.src}}"` results in both `dst` and `src` having the same value.
> A single label name can only appear once per expression. This means `| label_format foo=bar,foo="new"` is not allowed but you can use two expressions for the desired effect: `| label_format foo=bar | label_format foo="new"`
+
+### Drop Labels expression
+
+**Syntax**: `|drop name, other_name, some_name="some_value"`
+
+The `=` operator after the label name is a **label matching operator**.
+The following label matching operators are supported:
+
+- `=`: exactly equal
+- `!=`: not equal
+- `=~`: regex matches
+- `!~`: regex does not match
+
+The `| drop` expression will drop the given labels in the pipeline. For example, for the query `{job="varlogs"}|json|drop level, method="GET"`, with below log line
+
+```
+{"level": "info", "method": "GET", "path": "/", "host": "grafana.net", "status": "200"}
+```
+
+the result will be
+
+```
+{host="grafana.net", path="status="200"} {"level": "info", "method": "GET", "path": "/", "host": "grafana.net", "status": "200"}
+```
+
+Similary, this expression can be used to drop `__error__` labels as well. For example, for the query `{job="varlogs"}|json|drop __error__`, with below log line
+
+```
+INFO GET / loki.net 200
+```
+
+the result will be
+
+```
+{} INFO GET / loki.net 200
+```
+
+Example with regex and multiple names
+
+For the query `{job="varlogs"}|json|drop level, path, app=~"some-api.*"`, with below log lines
+
+```
+{"app": "some-api-service", "level": "info", "method": "GET", "path": "/", "host": "grafana.net", "status": "200}
+{"app: "other-service", "level": "info", "method": "GET", "path": "/", "host": "grafana.net", "status": "200}
+```
+
+the result will be
+
+```
+{host="grafana.net", job="varlogs", method="GET", status="200"} {""app": "some-api-service",", "level": "info", "method": "GET", "path": "/", "host": "grafana.net", "status": "200"}
+{app="other-service", host="grafana.net", job="varlogs", method="GET", status="200"} {"app": "other-service",, "level": "info", "method": "GET", "path": "/", "host": "grafana.net", "status": "200"}
+```
diff --git a/pkg/logql/log/drop_labels.go b/pkg/logql/log/drop_labels.go
new file mode 100644
index 0000000000000..eb697cd229ad2
--- /dev/null
+++ b/pkg/logql/log/drop_labels.go
@@ -0,0 +1,85 @@
+package log
+
+import (
+ "github.com/grafana/loki/pkg/logqlmodel"
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+type DropLabels struct {
+ dropLabels []DropLabel
+}
+
+type DropLabel struct {
+ Matcher *labels.Matcher
+ Name string
+}
+
+func NewDropLabel(matcher *labels.Matcher, name string) DropLabel {
+ return DropLabel{
+ Matcher: matcher,
+ Name: name,
+ }
+}
+
+func NewDropLabels(dl []DropLabel) *DropLabels {
+ return &DropLabels{dropLabels: dl}
+}
+
+func (dl *DropLabels) Process(ts int64, line []byte, lbls *LabelsBuilder) ([]byte, bool) {
+ for _, dropLabel := range dl.dropLabels {
+ if dropLabel.Matcher != nil {
+ dropLabelMatches(dropLabel.Matcher, lbls)
+ continue
+ }
+ name := dropLabel.Name
+ dropLabelNames(name, lbls)
+ }
+ return line, true
+}
+
+func (dl *DropLabels) RequiredLabelNames() []string { return []string{} }
+
+func isErrorLabel(name string) bool {
+ return name == logqlmodel.ErrorLabel
+}
+
+func isErrorDetailsLabel(name string) bool {
+ return name == logqlmodel.ErrorDetailsLabel
+}
+
+func dropLabelNames(name string, lbls *LabelsBuilder) {
+ if isErrorLabel(name) {
+ lbls.ResetError()
+ return
+ }
+ if isErrorDetailsLabel(name) {
+ lbls.ResetErrorDetails()
+ return
+ }
+ if _, ok := lbls.Get(name); ok {
+ lbls.Del(name)
+ }
+}
+
+func dropLabelMatches(matcher *labels.Matcher, lbls *LabelsBuilder) {
+ var value string
+ name := matcher.Name
+ if isErrorLabel(name) {
+ value = lbls.GetErr()
+ if matcher.Matches(value) {
+ lbls.ResetError()
+ }
+ return
+ }
+ if isErrorDetailsLabel(name) {
+ value = lbls.GetErrorDetails()
+ if matcher.Matches(value) {
+ lbls.ResetErrorDetails()
+ }
+ return
+ }
+ value, _ = lbls.Get(name)
+ if matcher.Matches(value) {
+ lbls.Del(name)
+ }
+}
diff --git a/pkg/logql/log/drop_labels_test.go b/pkg/logql/log/drop_labels_test.go
new file mode 100644
index 0000000000000..19e275e2d46ea
--- /dev/null
+++ b/pkg/logql/log/drop_labels_test.go
@@ -0,0 +1,160 @@
+package log
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/require"
+
+ "github.com/grafana/loki/pkg/logqlmodel"
+)
+
+func Test_DropLabels(t *testing.T) {
+ tests := []struct {
+ Name string
+ dropLabels []DropLabel
+ err string
+ errDetails string
+ lbs labels.Labels
+ want labels.Labels
+ }{
+ {
+ "drop by name",
+ []DropLabel{
+ {
+ nil,
+ "app",
+ },
+ {
+ nil,
+ "namespace",
+ },
+ },
+ "",
+ "",
+ labels.Labels{
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ labels.Labels{
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ },
+ {
+ "drop by __error__",
+ []DropLabel{
+ {
+ labels.MustNewMatcher(labels.MatchEqual, logqlmodel.ErrorLabel, errJSON),
+ "",
+ },
+ {
+ nil,
+ "__error_details__",
+ },
+ },
+ errJSON,
+ "json error",
+ labels.Labels{
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ labels.Labels{
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ },
+ {
+ "drop with wrong __error__ value",
+ []DropLabel{
+ {
+ labels.MustNewMatcher(labels.MatchEqual, logqlmodel.ErrorLabel, errLogfmt),
+ "",
+ },
+ },
+ errJSON,
+ "json error",
+ labels.Labels{
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ labels.Labels{
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ {Name: logqlmodel.ErrorLabel, Value: errJSON},
+ {Name: logqlmodel.ErrorDetailsLabel, Value: "json error"},
+ },
+ },
+ {
+ "drop by __error_details__",
+ []DropLabel{
+ {
+ labels.MustNewMatcher(labels.MatchRegexp, logqlmodel.ErrorDetailsLabel, "expecting json.*"),
+ "",
+ },
+ {
+ nil,
+ "__error__",
+ },
+ },
+ errJSON,
+ "expecting json object but it is not",
+ labels.Labels{
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ labels.Labels{
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ },
+ {
+ "drop labels with names and matcher",
+ []DropLabel{
+ {
+ labels.MustNewMatcher(labels.MatchEqual, logqlmodel.ErrorLabel, errJSON),
+ "",
+ },
+ {
+ nil,
+ "__error_details__",
+ },
+ {
+ nil,
+ "app",
+ },
+ {
+ nil,
+ "namespace",
+ },
+ },
+ errJSON,
+ "json error",
+ labels.Labels{
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ labels.Labels{
+ {Name: "pod_uuid", Value: "foo"},
+ },
+ },
+ }
+ for _, tt := range tests {
+ dropLabels := NewDropLabels(tt.dropLabels)
+ lbls := NewBaseLabelsBuilder().ForLabels(tt.lbs, tt.lbs.Hash())
+ lbls.Reset()
+ lbls.SetErr(tt.err)
+ lbls.SetErrorDetails(tt.errDetails)
+ dropLabels.Process(0, []byte(""), lbls)
+ sort.Sort(tt.want)
+ require.Equal(t, tt.want, lbls.LabelsResult().Labels())
+ }
+}
diff --git a/pkg/logql/log/labels.go b/pkg/logql/log/labels.go
index 99684f58182c1..1081787db4367 100644
--- a/pkg/logql/log/labels.go
+++ b/pkg/logql/log/labels.go
@@ -166,6 +166,16 @@ func (b *LabelsBuilder) SetErrorDetails(desc string) *LabelsBuilder {
return b
}
+func (b *LabelsBuilder) ResetError() *LabelsBuilder {
+ b.err = ""
+ return b
+}
+
+func (b *LabelsBuilder) ResetErrorDetails() *LabelsBuilder {
+ b.errDetails = ""
+ return b
+}
+
func (b *LabelsBuilder) GetErrorDetails() string {
return b.errDetails
}
diff --git a/pkg/logql/log/pipeline_test.go b/pkg/logql/log/pipeline_test.go
index 24d2c05da6db0..f900978bbe94a 100644
--- a/pkg/logql/log/pipeline_test.go
+++ b/pkg/logql/log/pipeline_test.go
@@ -1,6 +1,7 @@
package log
import (
+ "sort"
"testing"
"time"
@@ -134,6 +135,109 @@ var (
resSample float64
)
+func TestDropLabelsPipeline(t *testing.T) {
+ tests := []struct {
+ name string
+ stages []Stage
+ lines [][]byte
+ wantLine [][]byte
+ wantLabels []labels.Labels
+ }{
+ {
+ "drop __error__",
+ []Stage{
+ NewLogfmtParser(),
+ NewJSONParser(),
+ NewDropLabels([]DropLabel{
+ {
+ nil,
+ "__error__",
+ },
+ {
+ nil,
+ "__error_details__",
+ },
+ }),
+ },
+ [][]byte{
+ []byte(`level=info ts=2020-10-18T18:04:22.147378997Z caller=metrics.go:81 status=200`),
+ []byte(`{"app":"foo","namespace":"prod","pod":{"uuid":"foo","deployment":{"ref":"foobar"}}}`),
+ },
+ [][]byte{
+ []byte(`level=info ts=2020-10-18T18:04:22.147378997Z caller=metrics.go:81 status=200`),
+ []byte(`{"app":"foo","namespace":"prod","pod":{"uuid":"foo","deployment":{"ref":"foobar"}}}`),
+ },
+ []labels.Labels{
+ {
+ {Name: "level", Value: "info"},
+ {Name: "ts", Value: "2020-10-18T18:04:22.147378997Z"},
+ {Name: "caller", Value: "metrics.go:81"},
+ {Name: "status", Value: "200"},
+ },
+ {
+ {Name: "app", Value: "foo"},
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ {Name: "pod_deployment_ref", Value: "foobar"},
+ },
+ },
+ },
+ {
+ "drop __error__ with matching value",
+ []Stage{
+ NewLogfmtParser(),
+ NewJSONParser(),
+ NewDropLabels([]DropLabel{
+ {
+ labels.MustNewMatcher(labels.MatchEqual, logqlmodel.ErrorLabel, errLogfmt),
+ "",
+ },
+ {
+ labels.MustNewMatcher(labels.MatchEqual, "status", "200"),
+ "",
+ },
+ {
+ nil,
+ "app",
+ },
+ }),
+ },
+ [][]byte{
+ []byte(`level=info ts=2020-10-18T18:04:22.147378997Z caller=metrics.go:81 status=200`),
+ []byte(`{"app":"foo","namespace":"prod","pod":{"uuid":"foo","deployment":{"ref":"foobar"}}}`),
+ },
+ [][]byte{
+ []byte(`level=info ts=2020-10-18T18:04:22.147378997Z caller=metrics.go:81 status=200`),
+ []byte(`{"app":"foo","namespace":"prod","pod":{"uuid":"foo","deployment":{"ref":"foobar"}}}`),
+ },
+ []labels.Labels{
+ {
+ {Name: "level", Value: "info"},
+ {Name: "ts", Value: "2020-10-18T18:04:22.147378997Z"},
+ {Name: "caller", Value: "metrics.go:81"},
+ {Name: logqlmodel.ErrorLabel, Value: errJSON},
+ {Name: logqlmodel.ErrorDetailsLabel, Value: "expecting json object(6), but it is not"},
+ },
+ {
+ {Name: "namespace", Value: "prod"},
+ {Name: "pod_uuid", Value: "foo"},
+ {Name: "pod_deployment_ref", Value: "foobar"},
+ {Name: logqlmodel.ErrorDetailsLabel, Value: "logfmt syntax error at pos 2 : unexpected '\"'"},
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ p := NewPipeline(tt.stages)
+ sp := p.ForStream(labels.Labels{})
+ for i, line := range tt.lines {
+ _, finalLbs, _ := sp.Process(0, line)
+ sort.Sort(tt.wantLabels[i])
+ require.Equal(t, tt.wantLabels[i], finalLbs.Labels())
+ }
+ }
+
+}
func Benchmark_Pipeline(b *testing.B) {
b.ReportAllocs()
diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go
index c7832352dcd58..f9dc906b20d5f 100644
--- a/pkg/logql/syntax/ast.go
+++ b/pkg/logql/syntax/ast.go
@@ -446,6 +446,44 @@ func (e *DecolorizeExpr) String() string {
}
func (e *DecolorizeExpr) Walk(f WalkFn) { f(e) }
+type DropLabelsExpr struct {
+ dropLabels []log.DropLabel
+ implicit
+}
+
+func newDropLabelsExpr(dropLabels []log.DropLabel) *DropLabelsExpr {
+ return &DropLabelsExpr{dropLabels: dropLabels}
+}
+
+func (e *DropLabelsExpr) Shardable() bool { return true }
+
+func (e *DropLabelsExpr) Stage() (log.Stage, error) {
+ return log.NewDropLabels(e.dropLabels), nil
+}
+func (e *DropLabelsExpr) String() string {
+ var sb strings.Builder
+
+ sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpDrop))
+
+ for i, dropLabel := range e.dropLabels {
+ if dropLabel.Matcher != nil {
+ sb.WriteString(dropLabel.Matcher.String())
+ if i+1 != len(e.dropLabels) {
+ sb.WriteString(",")
+ }
+ }
+ if dropLabel.Name != "" {
+ sb.WriteString(dropLabel.Name)
+ if i+1 != len(e.dropLabels) {
+ sb.WriteString(",")
+ }
+ }
+ }
+ str := sb.String()
+ return str
+}
+func (e *DropLabelsExpr) Walk(f WalkFn) { f(e) }
+
func (e *LineFmtExpr) Shardable() bool { return true }
func (e *LineFmtExpr) Walk(f WalkFn) { f(e) }
@@ -460,7 +498,6 @@ func (e *LineFmtExpr) String() string {
type LabelFmtExpr struct {
Formats []log.LabelFmt
-
implicit
}
@@ -480,7 +517,9 @@ func (e *LabelFmtExpr) Stage() (log.Stage, error) {
func (e *LabelFmtExpr) String() string {
var sb strings.Builder
+
sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpFmtLabel))
+
for i, f := range e.Formats {
sb.WriteString(f.Name)
sb.WriteString("=")
@@ -725,6 +764,9 @@ const (
// function filters
OpFilterIP = "ip"
+
+ // drop labels
+ OpDrop = "drop"
)
func IsComparisonOperator(op string) bool {
diff --git a/pkg/logql/syntax/expr.y b/pkg/logql/syntax/expr.y
index 4f88935a69865..65a6c44d509b2 100644
--- a/pkg/logql/syntax/expr.y
+++ b/pkg/logql/syntax/expr.y
@@ -59,6 +59,9 @@ import (
UnwrapExpr *UnwrapExpr
DecolorizeExpr *DecolorizeExpr
OffsetExpr *OffsetExpr
+ DropLabel log.DropLabel
+ DropLabels []log.DropLabel
+ DropLabelsExpr *DropLabelsExpr
}
%start root
@@ -98,6 +101,9 @@ import (
%type <LineFilter> lineFilter
%type <LineFormatExpr> lineFormatExpr
%type <DecolorizeExpr> decolorizeExpr
+%type <DropLabelsExpr> dropLabelsExpr
+%type <DropLabels> dropLabels
+%type <DropLabel> dropLabel
%type <LabelFormatExpr> labelFormatExpr
%type <LabelFormat> labelFormat
%type <LabelsFormat> labelsFormat
@@ -117,7 +123,7 @@ import (
BYTES_OVER_TIME BYTES_RATE BOOL JSON REGEXP LOGFMT PIPE LINE_FMT LABEL_FMT UNWRAP AVG_OVER_TIME SUM_OVER_TIME MIN_OVER_TIME
MAX_OVER_TIME STDVAR_OVER_TIME STDDEV_OVER_TIME QUANTILE_OVER_TIME BYTES_CONV DURATION_CONV DURATION_SECONDS_CONV
FIRST_OVER_TIME LAST_OVER_TIME ABSENT_OVER_TIME VECTOR LABEL_REPLACE UNPACK OFFSET PATTERN IP ON IGNORING GROUP_LEFT GROUP_RIGHT
- DECOLORIZE
+ DECOLORIZE DROP
// Operators are listed with increasing precedence.
%left <binOp> OR
@@ -254,6 +260,7 @@ pipelineStage:
| PIPE lineFormatExpr { $$ = $2 }
| PIPE decolorizeExpr { $$ = $2 }
| PIPE labelFormatExpr { $$ = $2 }
+ | PIPE dropLabelsExpr { $$ = $2 }
;
filterOp:
@@ -296,11 +303,12 @@ labelsFormat:
| labelsFormat COMMA error
;
-labelFormatExpr: LABEL_FMT labelsFormat { $$ = newLabelFmtExpr($2) };
+labelFormatExpr:
+ LABEL_FMT labelsFormat { $$ = newLabelFmtExpr($2) };
labelFilter:
matcher { $$ = log.NewStringLabelFilter($1) }
- | ipLabelFilter { $$ = $1 }
+ | ipLabelFilter { $$ = $1 }
| unitFilter { $$ = $1 }
| numberFilter { $$ = $1 }
| OPEN_PARENTHESIS labelFilter CLOSE_PARENTHESIS { $$ = $2 }
@@ -358,6 +366,17 @@ numberFilter:
| IDENTIFIER CMP_EQ NUMBER { $$ = log.NewNumericLabelFilter(log.LabelFilterEqual, $1, mustNewFloat($3))}
;
+dropLabel:
+ IDENTIFIER { $$ = log.NewDropLabel(nil, $1) }
+ | matcher { $$ = log.NewDropLabel($1, "") }
+
+dropLabels:
+ dropLabel { $$ = []log.DropLabel{$1}}
+ | dropLabels COMMA dropLabel { $$ = append($1, $3) }
+ ;
+
+dropLabelsExpr: DROP dropLabels { $$ = newDropLabelsExpr($2) }
+
// Operator precedence only works if each of these is listed separately.
binOpExpr:
expr OR binOpModifier expr { $$ = mustNewBinOpExpr("or", $3, $1, $4) }
diff --git a/pkg/logql/syntax/expr.y.go b/pkg/logql/syntax/expr.y.go
index 529e4f5a9cda6..ede5c71cec941 100644
--- a/pkg/logql/syntax/expr.y.go
+++ b/pkg/logql/syntax/expr.y.go
@@ -6,7 +6,6 @@ package syntax
import __yyfmt__ "fmt"
//line pkg/logql/syntax/expr.y:2
-
import (
"github.com/grafana/loki/pkg/logql/log"
"github.com/prometheus/prometheus/model/labels"
@@ -65,6 +64,9 @@ type exprSymType struct {
UnwrapExpr *UnwrapExpr
DecolorizeExpr *DecolorizeExpr
OffsetExpr *OffsetExpr
+ DropLabel log.DropLabel
+ DropLabels []log.DropLabel
+ DropLabelsExpr *DropLabelsExpr
}
const BYTES = 57346
@@ -138,21 +140,22 @@ const IGNORING = 57413
const GROUP_LEFT = 57414
const GROUP_RIGHT = 57415
const DECOLORIZE = 57416
-const OR = 57417
-const AND = 57418
-const UNLESS = 57419
-const CMP_EQ = 57420
-const NEQ = 57421
-const LT = 57422
-const LTE = 57423
-const GT = 57424
-const GTE = 57425
-const ADD = 57426
-const SUB = 57427
-const MUL = 57428
-const DIV = 57429
-const MOD = 57430
-const POW = 57431
+const DROP = 57417
+const OR = 57418
+const AND = 57419
+const UNLESS = 57420
+const CMP_EQ = 57421
+const NEQ = 57422
+const LT = 57423
+const LTE = 57424
+const GT = 57425
+const GTE = 57426
+const ADD = 57427
+const SUB = 57428
+const MUL = 57429
+const DIV = 57430
+const MOD = 57431
+const POW = 57432
var exprToknames = [...]string{
"$end",
@@ -229,6 +232,7 @@ var exprToknames = [...]string{
"GROUP_LEFT",
"GROUP_RIGHT",
"DECOLORIZE",
+ "DROP",
"OR",
"AND",
"UNLESS",
@@ -252,7 +256,7 @@ const exprEofCode = 1
const exprErrCode = 2
const exprInitialStackSize = 16
-//line pkg/logql/syntax/expr.y:509
+//line pkg/logql/syntax/expr.y:528
//line yacctab:1
var exprExca = [...]int8{
@@ -263,112 +267,113 @@ var exprExca = [...]int8{
const exprPrivate = 57344
-const exprLast = 551
+const exprLast = 561
var exprAct = [...]int16{
- 258, 204, 82, 4, 185, 64, 173, 5, 178, 213,
- 73, 120, 56, 63, 261, 143, 75, 2, 51, 52,
- 53, 54, 55, 56, 266, 78, 48, 49, 50, 57,
- 58, 61, 62, 59, 60, 51, 52, 53, 54, 55,
- 56, 49, 50, 57, 58, 61, 62, 59, 60, 51,
- 52, 53, 54, 55, 56, 57, 58, 61, 62, 59,
- 60, 51, 52, 53, 54, 55, 56, 157, 158, 107,
- 187, 141, 142, 111, 53, 54, 55, 56, 330, 139,
- 141, 142, 71, 155, 156, 147, 131, 264, 145, 69,
- 70, 152, 71, 263, 67, 330, 264, 92, 261, 69,
- 70, 71, 312, 348, 83, 84, 154, 350, 69, 70,
- 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
- 169, 170, 171, 172, 345, 206, 304, 262, 338, 304,
- 203, 182, 337, 128, 206, 71, 193, 188, 191, 192,
- 189, 190, 69, 70, 333, 267, 140, 72, 262, 133,
- 195, 124, 327, 211, 207, 335, 314, 72, 200, 205,
- 108, 216, 208, 311, 263, 263, 72, 263, 206, 200,
- 275, 128, 115, 117, 116, 321, 125, 127, 266, 200,
- 296, 224, 225, 226, 305, 175, 263, 275, 275, 124,
- 71, 270, 320, 319, 118, 295, 119, 69, 70, 273,
- 72, 201, 126, 203, 256, 259, 128, 265, 71, 268,
- 145, 107, 271, 111, 272, 69, 70, 260, 257, 71,
- 175, 269, 128, 206, 124, 229, 69, 70, 219, 275,
- 279, 281, 284, 286, 318, 289, 287, 307, 308, 309,
- 124, 206, 174, 261, 209, 81, 128, 83, 84, 71,
- 215, 144, 206, 275, 135, 72, 69, 70, 277, 13,
- 297, 215, 299, 301, 124, 303, 107, 146, 344, 285,
- 302, 313, 298, 72, 134, 107, 176, 174, 315, 128,
- 283, 294, 66, 215, 72, 115, 117, 116, 293, 125,
- 127, 275, 128, 175, 215, 317, 276, 124, 223, 324,
- 325, 215, 282, 215, 107, 326, 175, 118, 13, 119,
- 124, 328, 329, 280, 72, 126, 146, 334, 222, 274,
- 217, 238, 214, 197, 239, 237, 16, 221, 220, 194,
- 340, 151, 341, 342, 13, 234, 150, 196, 235, 233,
- 230, 149, 6, 88, 346, 87, 21, 22, 23, 36,
- 45, 46, 37, 39, 40, 38, 41, 42, 43, 44,
- 24, 25, 176, 174, 80, 227, 218, 210, 202, 231,
- 26, 27, 28, 29, 30, 31, 32, 137, 79, 228,
- 33, 34, 35, 47, 19, 212, 236, 153, 343, 77,
- 332, 136, 253, 13, 138, 254, 252, 349, 331, 310,
- 232, 6, 300, 17, 18, 21, 22, 23, 36, 45,
- 46, 37, 39, 40, 38, 41, 42, 43, 44, 24,
- 25, 250, 86, 247, 251, 249, 248, 246, 85, 26,
- 27, 28, 29, 30, 31, 32, 291, 292, 347, 33,
- 34, 35, 47, 19, 148, 244, 336, 241, 245, 243,
- 242, 240, 13, 89, 3, 323, 322, 288, 278, 255,
- 6, 74, 17, 18, 21, 22, 23, 36, 45, 46,
+ 265, 210, 82, 4, 121, 64, 175, 190, 187, 219,
+ 73, 75, 2, 63, 180, 5, 145, 56, 78, 48,
+ 49, 50, 57, 58, 61, 62, 59, 60, 51, 52,
+ 53, 54, 55, 56, 49, 50, 57, 58, 61, 62,
+ 59, 60, 51, 52, 53, 54, 55, 56, 57, 58,
+ 61, 62, 59, 60, 51, 52, 53, 54, 55, 56,
+ 51, 52, 53, 54, 55, 56, 159, 160, 268, 107,
+ 193, 143, 144, 111, 53, 54, 55, 56, 273, 141,
+ 143, 144, 271, 67, 130, 149, 245, 71, 203, 246,
+ 244, 154, 71, 270, 69, 70, 147, 320, 177, 69,
+ 70, 156, 125, 157, 158, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 133,
+ 212, 71, 338, 92, 81, 212, 83, 84, 69, 70,
+ 338, 209, 184, 312, 192, 312, 71, 199, 194, 197,
+ 198, 195, 196, 69, 70, 201, 274, 142, 341, 108,
+ 268, 243, 282, 72, 212, 217, 176, 329, 72, 269,
+ 71, 211, 269, 222, 213, 214, 358, 69, 70, 212,
+ 282, 270, 353, 270, 268, 328, 241, 319, 202, 242,
+ 240, 282, 135, 230, 231, 232, 327, 72, 71, 130,
+ 130, 83, 84, 66, 282, 69, 70, 270, 130, 326,
+ 270, 282, 72, 346, 177, 345, 284, 125, 125, 271,
+ 263, 266, 177, 272, 71, 275, 125, 107, 278, 111,
+ 279, 69, 70, 267, 147, 264, 72, 276, 116, 118,
+ 117, 206, 126, 128, 273, 343, 286, 288, 291, 293,
+ 89, 239, 322, 192, 303, 296, 300, 212, 294, 221,
+ 119, 209, 120, 304, 72, 221, 71, 130, 127, 129,
+ 280, 178, 176, 69, 70, 221, 335, 305, 292, 307,
+ 309, 177, 311, 107, 290, 125, 235, 310, 321, 306,
+ 72, 225, 107, 282, 289, 323, 221, 206, 283, 212,
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 221, 287, 130, 332, 333, 277,
+ 206, 221, 107, 334, 215, 137, 136, 313, 13, 336,
+ 337, 302, 72, 223, 125, 342, 148, 146, 178, 176,
+ 220, 237, 207, 301, 16, 13, 229, 228, 348, 227,
+ 349, 350, 13, 148, 226, 200, 153, 152, 151, 88,
+ 6, 87, 354, 80, 21, 22, 23, 36, 45, 46,
+ 37, 39, 40, 38, 41, 42, 43, 44, 24, 25,
+ 315, 316, 317, 356, 352, 325, 281, 238, 26, 27,
+ 28, 29, 30, 31, 32, 236, 139, 233, 33, 34,
+ 35, 47, 19, 224, 218, 216, 208, 79, 234, 351,
+ 138, 260, 13, 140, 261, 259, 155, 340, 77, 339,
+ 6, 318, 17, 18, 21, 22, 23, 36, 45, 46,
+ 37, 39, 40, 38, 41, 42, 43, 44, 24, 25,
+ 257, 308, 254, 258, 256, 255, 253, 86, 26, 27,
+ 28, 29, 30, 31, 32, 298, 299, 357, 33, 34,
+ 35, 47, 19, 130, 150, 251, 85, 248, 252, 250,
+ 249, 247, 13, 355, 344, 331, 330, 295, 285, 262,
+ 6, 125, 17, 18, 21, 22, 23, 36, 45, 46,
37, 39, 40, 38, 41, 42, 43, 44, 24, 25,
- 290, 199, 198, 186, 121, 197, 196, 183, 26, 27,
- 28, 29, 30, 31, 32, 181, 180, 339, 33, 34,
- 35, 47, 19, 93, 94, 95, 96, 97, 98, 99,
- 100, 101, 102, 103, 104, 105, 106, 316, 179, 79,
- 186, 17, 18, 122, 177, 110, 184, 114, 113, 112,
- 65, 129, 123, 130, 109, 91, 90, 11, 10, 9,
- 132, 20, 12, 15, 8, 306, 14, 7, 76, 68,
+ 205, 204, 116, 118, 117, 203, 126, 128, 26, 27,
+ 28, 29, 30, 31, 32, 202, 3, 185, 33, 34,
+ 35, 47, 19, 74, 119, 297, 120, 183, 188, 122,
+ 182, 347, 127, 129, 324, 191, 181, 79, 188, 123,
+ 179, 110, 17, 18, 186, 114, 189, 115, 113, 112,
+ 65, 131, 124, 132, 109, 91, 90, 11, 10, 9,
+ 134, 20, 12, 15, 8, 314, 14, 7, 76, 68,
1,
}
var exprPact = [...]int16{
- 319, -1000, -49, -1000, -1000, 235, 319, -1000, -1000, -1000,
- -1000, -1000, -1000, 373, 341, 222, -1000, 421, 415, 322,
- 320, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 327, -1000, -57, -1000, -1000, 146, 327, -1000, -1000, -1000,
+ -1000, -1000, -1000, 392, 330, 101, -1000, 449, 430, 328,
+ 326, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54, 54,
- 54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
- 54, 54, 54, 235, -1000, 68, 241, -1000, 80, -1000,
- -1000, -1000, -1000, 250, 230, -49, 375, -1000, -1000, 67,
- 244, 437, 318, 313, 308, -1000, -1000, 319, 380, 319,
- 13, -5, -1000, 319, 319, 319, 319, 319, 319, 319,
- 319, 319, 319, 319, 319, 319, 319, -1000, -1000, -1000,
- -1000, 287, -1000, -1000, -1000, 513, -1000, 490, -1000, 489,
- -1000, -1000, -1000, -1000, 217, 481, -1000, 515, 58, -1000,
- -1000, -1000, 306, -1000, -1000, -1000, -1000, -1000, 514, 480,
- 479, 476, 475, 177, 349, 194, 293, 220, 348, 378,
- 298, 296, 347, 204, -35, 305, 304, 295, 275, -23,
- -23, -12, -12, -77, -77, -77, -77, -66, -66, -66,
- -66, -66, -66, 287, 217, 217, 217, 346, -1000, 367,
- -1000, -1000, 201, -1000, 321, -1000, 357, 331, 317, 443,
- 441, 419, 417, 388, 453, -1000, -1000, -1000, -1000, -1000,
- -1000, 79, 293, 176, 118, 87, 128, 121, 167, 79,
- 319, 175, 300, 272, -1000, -1000, 234, -1000, 452, -1000,
- 289, 278, 256, 245, 274, 287, 166, 513, 451, -1000,
- 478, 431, 265, -1000, -1000, -1000, 258, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 146, -1000, 174, 448, -1000, 113, -1000,
+ -1000, -1000, -1000, 292, 291, -57, 384, -1000, -1000, 67,
+ 320, 447, 325, 324, 323, -1000, -1000, 327, 399, 327,
+ 33, -6, -1000, 327, 327, 327, 327, 327, 327, 327,
+ 327, 327, 327, 327, 327, 327, 327, -1000, -1000, -1000,
+ -1000, 185, -1000, -1000, -1000, -1000, 521, -1000, 514, -1000,
+ 511, -1000, -1000, -1000, -1000, 301, 501, -1000, 523, 520,
+ 58, -1000, -1000, -1000, 322, -1000, -1000, -1000, -1000, -1000,
+ 522, 499, 489, 485, 484, 308, 377, 242, 303, 290,
+ 376, 387, 306, 299, 374, 257, -43, 321, 316, 314,
+ 313, -31, -31, -13, -13, -73, -73, -73, -73, -25,
+ -25, -25, -25, -25, -25, 185, 301, 301, 301, 368,
+ -1000, 386, -1000, -1000, 252, -1000, 366, -1000, 319, 358,
+ -1000, 67, -1000, 172, 82, 453, 451, 428, 426, 397,
+ 463, -1000, -1000, -1000, -1000, -1000, -1000, 166, 303, 107,
+ 150, 200, 184, 122, 285, 166, 327, 236, 357, 264,
+ -1000, -1000, 182, -1000, 462, -1000, 281, 260, 250, 244,
+ 193, 185, 79, 521, 461, -1000, 513, 440, 520, 310,
+ -1000, -1000, -1000, 298, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
- -1000, -1000, -1000, -1000, -1000, 171, -1000, 156, 205, 46,
- 205, 394, -53, 217, -53, 117, 179, 390, 139, 78,
- -1000, -1000, 132, -1000, 319, 512, -1000, -1000, 276, 210,
- -1000, 169, -1000, -1000, 168, -1000, 151, -1000, -1000, -1000,
- -1000, -1000, -1000, 450, 449, -1000, 79, 46, 205, 46,
- -1000, -1000, 287, -1000, -53, -1000, 129, -1000, -1000, -1000,
- 31, 389, 381, 120, 79, 131, -1000, 440, -1000, -1000,
- -1000, -1000, 108, 104, -1000, 46, -1000, 492, 48, 46,
- -26, -53, -53, 379, -1000, -1000, 249, -1000, -1000, 100,
- 46, -1000, -1000, -53, 432, -1000, -1000, 84, 391, 83,
- -1000,
+ -1000, -1000, 220, -1000, 229, 78, 46, 78, 423, 1,
+ 301, 1, 126, 312, 402, 153, 73, -1000, -1000, 218,
+ -1000, 327, 519, -1000, -1000, 356, 175, -1000, 162, -1000,
+ -1000, 151, -1000, 133, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 460, 459, -1000, 166, 46, 78, 46, -1000, -1000,
+ 185, -1000, 1, -1000, 243, -1000, -1000, -1000, 83, 400,
+ 398, 124, 166, 211, -1000, 458, -1000, -1000, -1000, -1000,
+ 181, 179, -1000, 46, -1000, 516, 75, 46, 28, 1,
+ 1, 390, -1000, -1000, 355, -1000, -1000, 148, 46, -1000,
+ -1000, 1, 457, -1000, -1000, 354, 441, 142, -1000,
}
var exprPgo = [...]int16{
- 0, 550, 16, 549, 2, 9, 454, 3, 15, 11,
- 548, 547, 546, 545, 7, 544, 543, 542, 541, 540,
- 539, 538, 537, 453, 536, 535, 534, 13, 5, 533,
- 532, 531, 6, 530, 94, 529, 528, 527, 4, 526,
- 525, 8, 524, 1, 523, 484, 0,
+ 0, 560, 11, 559, 2, 9, 506, 3, 16, 4,
+ 558, 557, 556, 555, 15, 554, 553, 552, 551, 550,
+ 549, 548, 547, 240, 546, 545, 544, 13, 5, 543,
+ 542, 541, 6, 540, 83, 539, 538, 537, 536, 7,
+ 535, 8, 534, 531, 14, 530, 1, 529, 519, 0,
}
var exprR1 = [...]int8{
@@ -376,23 +381,23 @@ var exprR1 = [...]int8{
7, 6, 6, 6, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 43, 43, 43, 13, 13, 13, 11, 11, 11, 11,
+ 46, 46, 46, 13, 13, 13, 11, 11, 11, 11,
15, 15, 15, 15, 15, 15, 22, 3, 3, 3,
3, 14, 14, 14, 10, 10, 9, 9, 9, 9,
- 27, 27, 28, 28, 28, 28, 28, 28, 28, 19,
- 34, 34, 33, 33, 26, 26, 26, 26, 26, 40,
- 35, 36, 38, 38, 39, 39, 39, 37, 32, 32,
- 32, 32, 32, 32, 32, 32, 32, 41, 41, 42,
- 42, 45, 45, 44, 44, 31, 31, 31, 31, 31,
- 31, 31, 29, 29, 29, 29, 29, 29, 29, 30,
- 30, 30, 30, 30, 30, 30, 20, 20, 20, 20,
- 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
- 20, 24, 24, 25, 25, 25, 25, 23, 23, 23,
- 23, 23, 23, 23, 23, 21, 21, 21, 17, 18,
- 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 12, 12, 12, 12, 12, 12, 12, 12, 12,
- 12, 12, 12, 12, 12, 12, 46, 5, 5, 4,
- 4, 4, 4,
+ 27, 27, 28, 28, 28, 28, 28, 28, 28, 28,
+ 19, 34, 34, 33, 33, 26, 26, 26, 26, 26,
+ 43, 35, 36, 41, 41, 42, 42, 42, 40, 32,
+ 32, 32, 32, 32, 32, 32, 32, 32, 44, 44,
+ 45, 45, 48, 48, 47, 47, 31, 31, 31, 31,
+ 31, 31, 31, 29, 29, 29, 29, 29, 29, 29,
+ 30, 30, 30, 30, 30, 30, 30, 39, 39, 38,
+ 38, 37, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 24, 24, 25,
+ 25, 25, 25, 23, 23, 23, 23, 23, 23, 23,
+ 23, 21, 21, 21, 17, 18, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 49, 5, 5, 4, 4, 4, 4,
}
var exprR2 = [...]int8{
@@ -403,98 +408,98 @@ var exprR2 = [...]int8{
3, 6, 3, 1, 1, 1, 4, 6, 5, 7,
4, 5, 5, 6, 7, 7, 12, 1, 1, 1,
1, 3, 3, 2, 1, 3, 3, 3, 3, 3,
- 1, 2, 1, 2, 2, 2, 2, 2, 2, 1,
- 2, 5, 1, 2, 1, 1, 2, 1, 2, 2,
- 2, 1, 3, 3, 1, 3, 3, 2, 1, 1,
- 1, 1, 3, 2, 3, 3, 3, 3, 1, 1,
- 3, 6, 6, 1, 1, 3, 3, 3, 3, 3,
+ 1, 2, 1, 2, 2, 2, 2, 2, 2, 2,
+ 1, 2, 5, 1, 2, 1, 1, 2, 1, 2,
+ 2, 2, 1, 3, 3, 1, 3, 3, 2, 1,
+ 1, 1, 1, 3, 2, 3, 3, 3, 3, 1,
+ 1, 3, 6, 6, 1, 1, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 0, 1, 5, 4, 5, 4, 1, 1, 2,
- 4, 5, 2, 4, 5, 1, 2, 2, 4, 1,
+ 3, 3, 3, 3, 3, 3, 3, 1, 1, 1,
+ 3, 2, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 0, 1, 5,
+ 4, 5, 4, 1, 1, 2, 4, 5, 2, 4,
+ 5, 1, 2, 2, 4, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 2, 1, 3, 4,
- 4, 3, 3,
+ 1, 1, 2, 1, 3, 4, 4, 3, 3,
}
var exprChk = [...]int16{
-1000, -1, -2, -6, -7, -14, 23, -11, -15, -20,
- -21, -22, -17, 15, -12, -16, 7, 84, 85, 65,
+ -21, -22, -17, 15, -12, -16, 7, 85, 86, 65,
-18, 27, 28, 29, 41, 42, 51, 52, 53, 54,
55, 56, 57, 61, 62, 63, 30, 33, 36, 34,
- 35, 37, 38, 39, 40, 31, 32, 64, 75, 76,
- 77, 84, 85, 86, 87, 88, 89, 78, 79, 82,
- 83, 80, 81, -27, -28, -33, 47, -34, -3, 21,
- 22, 14, 79, -7, -6, -2, -10, 16, -9, 5,
+ 35, 37, 38, 39, 40, 31, 32, 64, 76, 77,
+ 78, 85, 86, 87, 88, 89, 90, 79, 80, 83,
+ 84, 81, 82, -27, -28, -33, 47, -34, -3, 21,
+ 22, 14, 80, -7, -6, -2, -10, 16, -9, 5,
23, 23, -4, 25, 26, 7, 7, 23, 23, -23,
-24, -25, 43, -23, -23, -23, -23, -23, -23, -23,
-23, -23, -23, -23, -23, -23, -23, -28, -34, -26,
- -40, -32, -35, -36, -37, 44, 46, 45, 66, 68,
- -9, -45, -44, -30, 23, 48, 74, 49, 5, -31,
- -29, 6, -19, 69, 24, 24, 16, 2, 19, 12,
- 79, 13, 14, -8, 7, -14, 23, -7, 7, 23,
- 23, 23, -7, 7, -2, 70, 71, 72, 73, -2,
- -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
- -2, -2, -2, -32, 76, 19, 75, -42, -41, 5,
- 6, 6, -32, 6, -39, -38, 5, 12, 79, 82,
- 83, 80, 81, 78, 23, -9, 6, 6, 6, 6,
- 2, 24, 19, 9, -43, -27, 47, -14, -8, 24,
- 19, -7, 7, -5, 24, 5, -5, 24, 19, 24,
- 23, 23, 23, 23, -32, -32, -32, 19, 12, 24,
- 19, 12, 69, 8, 4, 7, 69, 8, 4, 7,
+ -43, -32, -35, -36, -40, -37, 44, 46, 45, 66,
+ 68, -9, -48, -47, -30, 23, 48, 74, 49, 75,
+ 5, -31, -29, 6, -19, 69, 24, 24, 16, 2,
+ 19, 12, 80, 13, 14, -8, 7, -14, 23, -7,
+ 7, 23, 23, 23, -7, 7, -2, 70, 71, 72,
+ 73, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+ -2, -2, -2, -2, -2, -32, 77, 19, 76, -45,
+ -44, 5, 6, 6, -32, 6, -42, -41, 5, -38,
+ -39, 5, -9, 12, 80, 83, 84, 81, 82, 79,
+ 23, -9, 6, 6, 6, 6, 2, 24, 19, 9,
+ -46, -27, 47, -14, -8, 24, 19, -7, 7, -5,
+ 24, 5, -5, 24, 19, 24, 23, 23, 23, 23,
+ -32, -32, -32, 19, 12, 24, 19, 12, 19, 69,
+ 8, 4, 7, 69, 8, 4, 7, 8, 4, 7,
8, 4, 7, 8, 4, 7, 8, 4, 7, 8,
- 4, 7, 8, 4, 7, 6, -4, -8, -46, -43,
- -27, 67, 9, 47, 9, -43, 50, 24, -43, -27,
- 24, -4, -7, 24, 19, 19, 24, 24, 6, -5,
- 24, -5, 24, 24, -5, 24, -5, -41, 6, -38,
- 2, 5, 6, 23, 23, 24, 24, -43, -27, -43,
- 8, -46, -32, -46, 9, 5, -13, 58, 59, 60,
- 9, 24, 24, -43, 24, -7, 5, 19, 24, 24,
- 24, 24, 6, 6, -4, -43, -46, 23, -46, -43,
- 47, 9, 9, 24, -4, 24, 6, 24, 24, 5,
- -43, -46, -46, 9, 19, 24, -46, 6, 19, 6,
- 24,
+ 4, 7, 6, -4, -8, -49, -46, -27, 67, 9,
+ 47, 9, -46, 50, 24, -46, -27, 24, -4, -7,
+ 24, 19, 19, 24, 24, 6, -5, 24, -5, 24,
+ 24, -5, 24, -5, -44, 6, -41, 2, 5, 6,
+ -39, 23, 23, 24, 24, -46, -27, -46, 8, -49,
+ -32, -49, 9, 5, -13, 58, 59, 60, 9, 24,
+ 24, -46, 24, -7, 5, 19, 24, 24, 24, 24,
+ 6, 6, -4, -46, -49, 23, -49, -46, 47, 9,
+ 9, 24, -4, 24, 6, 24, 24, 5, -46, -49,
+ -49, 9, 19, 24, -49, 6, 19, 6, 24,
}
var exprDef = [...]int16{
0, -2, 1, 2, 3, 11, 0, 4, 5, 6,
- 7, 8, 9, 0, 0, 0, 165, 0, 0, 0,
- 0, 181, 182, 183, 184, 185, 186, 187, 188, 189,
- 190, 191, 192, 193, 194, 195, 170, 171, 172, 173,
- 174, 175, 176, 177, 178, 179, 180, 169, 151, 151,
- 151, 151, 151, 151, 151, 151, 151, 151, 151, 151,
- 151, 151, 151, 12, 70, 72, 0, 82, 0, 57,
+ 7, 8, 9, 0, 0, 0, 171, 0, 0, 0,
+ 0, 187, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 175, 157, 157,
+ 157, 157, 157, 157, 157, 157, 157, 157, 157, 157,
+ 157, 157, 157, 12, 70, 72, 0, 83, 0, 57,
58, 59, 60, 3, 2, 0, 0, 63, 64, 0,
- 0, 0, 0, 0, 0, 166, 167, 0, 0, 0,
- 157, 158, 152, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 71, 83, 73,
- 74, 75, 76, 77, 78, 84, 85, 0, 87, 0,
- 98, 99, 100, 101, 0, 0, 91, 0, 0, 113,
- 114, 80, 0, 79, 10, 13, 61, 62, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 3, 165, 0,
- 0, 0, 3, 0, 136, 0, 0, 159, 162, 137,
- 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
- 148, 149, 150, 103, 0, 0, 0, 89, 109, 108,
- 86, 88, 0, 90, 97, 94, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 65, 66, 67, 68, 69,
- 39, 46, 0, 14, 0, 0, 0, 0, 0, 50,
- 0, 3, 165, 0, 201, 197, 0, 202, 0, 168,
- 0, 0, 0, 0, 104, 105, 106, 0, 0, 102,
- 0, 0, 0, 120, 127, 134, 0, 119, 126, 133,
- 115, 122, 129, 116, 123, 130, 117, 124, 131, 118,
- 125, 132, 121, 128, 135, 0, 48, 0, 15, 18,
- 34, 0, 22, 0, 26, 0, 0, 0, 0, 0,
- 38, 52, 3, 51, 0, 0, 199, 200, 0, 0,
- 154, 0, 156, 160, 0, 163, 0, 110, 107, 95,
- 96, 92, 93, 0, 0, 81, 47, 19, 35, 36,
- 196, 23, 42, 27, 30, 40, 0, 43, 44, 45,
- 16, 0, 0, 0, 53, 3, 198, 0, 153, 155,
- 161, 164, 0, 0, 49, 37, 31, 0, 17, 20,
- 0, 24, 28, 0, 54, 55, 0, 111, 112, 0,
- 21, 25, 29, 32, 0, 41, 33, 0, 0, 0,
- 56,
+ 0, 0, 0, 0, 0, 172, 173, 0, 0, 0,
+ 163, 164, 158, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 71, 84, 73,
+ 74, 75, 76, 77, 78, 79, 85, 86, 0, 88,
+ 0, 99, 100, 101, 102, 0, 0, 92, 0, 0,
+ 0, 114, 115, 81, 0, 80, 10, 13, 61, 62,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+ 171, 0, 0, 0, 3, 0, 142, 0, 0, 165,
+ 168, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 104, 0, 0, 0, 90,
+ 110, 109, 87, 89, 0, 91, 98, 95, 0, 141,
+ 139, 137, 138, 0, 0, 0, 0, 0, 0, 0,
+ 0, 65, 66, 67, 68, 69, 39, 46, 0, 14,
+ 0, 0, 0, 0, 0, 50, 0, 3, 171, 0,
+ 207, 203, 0, 208, 0, 174, 0, 0, 0, 0,
+ 105, 106, 107, 0, 0, 103, 0, 0, 0, 0,
+ 121, 128, 135, 0, 120, 127, 134, 116, 123, 130,
+ 117, 124, 131, 118, 125, 132, 119, 126, 133, 122,
+ 129, 136, 0, 48, 0, 15, 18, 34, 0, 22,
+ 0, 26, 0, 0, 0, 0, 0, 38, 52, 3,
+ 51, 0, 0, 205, 206, 0, 0, 160, 0, 162,
+ 166, 0, 169, 0, 111, 108, 96, 97, 93, 94,
+ 140, 0, 0, 82, 47, 19, 35, 36, 202, 23,
+ 42, 27, 30, 40, 0, 43, 44, 45, 16, 0,
+ 0, 0, 53, 3, 204, 0, 159, 161, 167, 170,
+ 0, 0, 49, 37, 31, 0, 17, 20, 0, 24,
+ 28, 0, 54, 55, 0, 112, 113, 0, 21, 25,
+ 29, 32, 0, 41, 33, 0, 0, 0, 56,
}
var exprTok1 = [...]int8{
@@ -510,7 +515,7 @@ var exprTok2 = [...]int8{
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
- 82, 83, 84, 85, 86, 87, 88, 89,
+ 82, 83, 84, 85, 86, 87, 88, 89, 90,
}
var exprTok3 = [...]int8{
@@ -856,1212 +861,1248 @@ exprdefault:
case 1:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:132
+//line pkg/logql/syntax/expr.y:138
{
exprlex.(*parser).expr = exprDollar[1].Expr
}
case 2:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:135
+//line pkg/logql/syntax/expr.y:141
{
exprVAL.Expr = exprDollar[1].LogExpr
}
case 3:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:136
+//line pkg/logql/syntax/expr.y:142
{
exprVAL.Expr = exprDollar[1].MetricExpr
}
case 4:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:140
+//line pkg/logql/syntax/expr.y:146
{
exprVAL.MetricExpr = exprDollar[1].RangeAggregationExpr
}
case 5:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:141
+//line pkg/logql/syntax/expr.y:147
{
exprVAL.MetricExpr = exprDollar[1].VectorAggregationExpr
}
case 6:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:142
+//line pkg/logql/syntax/expr.y:148
{
exprVAL.MetricExpr = exprDollar[1].BinOpExpr
}
case 7:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:143
+//line pkg/logql/syntax/expr.y:149
{
exprVAL.MetricExpr = exprDollar[1].LiteralExpr
}
case 8:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:144
+//line pkg/logql/syntax/expr.y:150
{
exprVAL.MetricExpr = exprDollar[1].LabelReplaceExpr
}
case 9:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:145
+//line pkg/logql/syntax/expr.y:151
{
exprVAL.MetricExpr = exprDollar[1].VectorExpr
}
case 10:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:146
+//line pkg/logql/syntax/expr.y:152
{
exprVAL.MetricExpr = exprDollar[2].MetricExpr
}
case 11:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:150
+//line pkg/logql/syntax/expr.y:156
{
exprVAL.LogExpr = newMatcherExpr(exprDollar[1].Selector)
}
case 12:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:151
+//line pkg/logql/syntax/expr.y:157
{
exprVAL.LogExpr = newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr)
}
case 13:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:152
+//line pkg/logql/syntax/expr.y:158
{
exprVAL.LogExpr = exprDollar[2].LogExpr
}
case 14:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:156
+//line pkg/logql/syntax/expr.y:162
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, nil, nil)
}
case 15:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:157
+//line pkg/logql/syntax/expr.y:163
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, nil, exprDollar[3].OffsetExpr)
}
case 16:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:158
+//line pkg/logql/syntax/expr.y:164
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, nil, nil)
}
case 17:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:159
+//line pkg/logql/syntax/expr.y:165
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, nil, exprDollar[5].OffsetExpr)
}
case 18:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:160
+//line pkg/logql/syntax/expr.y:166
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, exprDollar[3].UnwrapExpr, nil)
}
case 19:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:161
+//line pkg/logql/syntax/expr.y:167
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, exprDollar[4].UnwrapExpr, exprDollar[3].OffsetExpr)
}
case 20:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:162
+//line pkg/logql/syntax/expr.y:168
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, exprDollar[5].UnwrapExpr, nil)
}
case 21:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:163
+//line pkg/logql/syntax/expr.y:169
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, exprDollar[6].UnwrapExpr, exprDollar[5].OffsetExpr)
}
case 22:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:164
+//line pkg/logql/syntax/expr.y:170
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].duration, exprDollar[2].UnwrapExpr, nil)
}
case 23:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:165
+//line pkg/logql/syntax/expr.y:171
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].duration, exprDollar[2].UnwrapExpr, exprDollar[4].OffsetExpr)
}
case 24:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:166
+//line pkg/logql/syntax/expr.y:172
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[5].duration, exprDollar[3].UnwrapExpr, nil)
}
case 25:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:167
+//line pkg/logql/syntax/expr.y:173
{
exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[5].duration, exprDollar[3].UnwrapExpr, exprDollar[6].OffsetExpr)
}
case 26:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:168
+//line pkg/logql/syntax/expr.y:174
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[3].duration, nil, nil)
}
case 27:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:169
+//line pkg/logql/syntax/expr.y:175
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[3].duration, nil, exprDollar[4].OffsetExpr)
}
case 28:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:170
+//line pkg/logql/syntax/expr.y:176
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[5].duration, nil, nil)
}
case 29:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:171
+//line pkg/logql/syntax/expr.y:177
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[5].duration, nil, exprDollar[6].OffsetExpr)
}
case 30:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:172
+//line pkg/logql/syntax/expr.y:178
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[4].duration, exprDollar[3].UnwrapExpr, nil)
}
case 31:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:173
+//line pkg/logql/syntax/expr.y:179
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[4].duration, exprDollar[3].UnwrapExpr, exprDollar[5].OffsetExpr)
}
case 32:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:174
+//line pkg/logql/syntax/expr.y:180
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[6].duration, exprDollar[4].UnwrapExpr, nil)
}
case 33:
exprDollar = exprS[exprpt-7 : exprpt+1]
-//line pkg/logql/syntax/expr.y:175
+//line pkg/logql/syntax/expr.y:181
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[6].duration, exprDollar[4].UnwrapExpr, exprDollar[7].OffsetExpr)
}
case 34:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:176
+//line pkg/logql/syntax/expr.y:182
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].PipelineExpr), exprDollar[2].duration, nil, nil)
}
case 35:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:177
+//line pkg/logql/syntax/expr.y:183
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[4].PipelineExpr), exprDollar[2].duration, nil, exprDollar[3].OffsetExpr)
}
case 36:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:178
+//line pkg/logql/syntax/expr.y:184
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].PipelineExpr), exprDollar[2].duration, exprDollar[4].UnwrapExpr, nil)
}
case 37:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:179
+//line pkg/logql/syntax/expr.y:185
{
exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[4].PipelineExpr), exprDollar[2].duration, exprDollar[5].UnwrapExpr, exprDollar[3].OffsetExpr)
}
case 38:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:180
+//line pkg/logql/syntax/expr.y:186
{
exprVAL.LogRangeExpr = exprDollar[2].LogRangeExpr
}
case 40:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:185
+//line pkg/logql/syntax/expr.y:191
{
exprVAL.UnwrapExpr = newUnwrapExpr(exprDollar[3].str, "")
}
case 41:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:186
+//line pkg/logql/syntax/expr.y:192
{
exprVAL.UnwrapExpr = newUnwrapExpr(exprDollar[5].str, exprDollar[3].ConvOp)
}
case 42:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:187
+//line pkg/logql/syntax/expr.y:193
{
exprVAL.UnwrapExpr = exprDollar[1].UnwrapExpr.addPostFilter(exprDollar[3].LabelFilter)
}
case 43:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:191
+//line pkg/logql/syntax/expr.y:197
{
exprVAL.ConvOp = OpConvBytes
}
case 44:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:192
+//line pkg/logql/syntax/expr.y:198
{
exprVAL.ConvOp = OpConvDuration
}
case 45:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:193
+//line pkg/logql/syntax/expr.y:199
{
exprVAL.ConvOp = OpConvDurationSeconds
}
case 46:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:197
+//line pkg/logql/syntax/expr.y:203
{
exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp, nil, nil)
}
case 47:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:198
+//line pkg/logql/syntax/expr.y:204
{
exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[5].LogRangeExpr, exprDollar[1].RangeOp, nil, &exprDollar[3].str)
}
case 48:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:199
+//line pkg/logql/syntax/expr.y:205
{
exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp, exprDollar[5].Grouping, nil)
}
case 49:
exprDollar = exprS[exprpt-7 : exprpt+1]
-//line pkg/logql/syntax/expr.y:200
+//line pkg/logql/syntax/expr.y:206
{
exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[5].LogRangeExpr, exprDollar[1].RangeOp, exprDollar[7].Grouping, &exprDollar[3].str)
}
case 50:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:205
+//line pkg/logql/syntax/expr.y:211
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].MetricExpr, exprDollar[1].VectorOp, nil, nil)
}
case 51:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:206
+//line pkg/logql/syntax/expr.y:212
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[4].MetricExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, nil)
}
case 52:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:207
+//line pkg/logql/syntax/expr.y:213
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].MetricExpr, exprDollar[1].VectorOp, exprDollar[5].Grouping, nil)
}
case 53:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:209
+//line pkg/logql/syntax/expr.y:215
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].MetricExpr, exprDollar[1].VectorOp, nil, &exprDollar[3].str)
}
case 54:
exprDollar = exprS[exprpt-7 : exprpt+1]
-//line pkg/logql/syntax/expr.y:210
+//line pkg/logql/syntax/expr.y:216
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].MetricExpr, exprDollar[1].VectorOp, exprDollar[7].Grouping, &exprDollar[3].str)
}
case 55:
exprDollar = exprS[exprpt-7 : exprpt+1]
-//line pkg/logql/syntax/expr.y:211
+//line pkg/logql/syntax/expr.y:217
{
exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[6].MetricExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, &exprDollar[4].str)
}
case 56:
exprDollar = exprS[exprpt-12 : exprpt+1]
-//line pkg/logql/syntax/expr.y:216
+//line pkg/logql/syntax/expr.y:222
{
exprVAL.LabelReplaceExpr = mustNewLabelReplaceExpr(exprDollar[3].MetricExpr, exprDollar[5].str, exprDollar[7].str, exprDollar[9].str, exprDollar[11].str)
}
case 57:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:220
+//line pkg/logql/syntax/expr.y:226
{
exprVAL.Filter = labels.MatchRegexp
}
case 58:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:221
+//line pkg/logql/syntax/expr.y:227
{
exprVAL.Filter = labels.MatchEqual
}
case 59:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:222
+//line pkg/logql/syntax/expr.y:228
{
exprVAL.Filter = labels.MatchNotRegexp
}
case 60:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:223
+//line pkg/logql/syntax/expr.y:229
{
exprVAL.Filter = labels.MatchNotEqual
}
case 61:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:227
+//line pkg/logql/syntax/expr.y:233
{
exprVAL.Selector = exprDollar[2].Matchers
}
case 62:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:228
+//line pkg/logql/syntax/expr.y:234
{
exprVAL.Selector = exprDollar[2].Matchers
}
case 63:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:229
+//line pkg/logql/syntax/expr.y:235
{
}
case 64:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:233
+//line pkg/logql/syntax/expr.y:239
{
exprVAL.Matchers = []*labels.Matcher{exprDollar[1].Matcher}
}
case 65:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:234
+//line pkg/logql/syntax/expr.y:240
{
exprVAL.Matchers = append(exprDollar[1].Matchers, exprDollar[3].Matcher)
}
case 66:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:238
+//line pkg/logql/syntax/expr.y:244
{
exprVAL.Matcher = mustNewMatcher(labels.MatchEqual, exprDollar[1].str, exprDollar[3].str)
}
case 67:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:239
+//line pkg/logql/syntax/expr.y:245
{
exprVAL.Matcher = mustNewMatcher(labels.MatchNotEqual, exprDollar[1].str, exprDollar[3].str)
}
case 68:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:240
+//line pkg/logql/syntax/expr.y:246
{
exprVAL.Matcher = mustNewMatcher(labels.MatchRegexp, exprDollar[1].str, exprDollar[3].str)
}
case 69:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:241
+//line pkg/logql/syntax/expr.y:247
{
exprVAL.Matcher = mustNewMatcher(labels.MatchNotRegexp, exprDollar[1].str, exprDollar[3].str)
}
case 70:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:245
+//line pkg/logql/syntax/expr.y:251
{
exprVAL.PipelineExpr = MultiStageExpr{exprDollar[1].PipelineStage}
}
case 71:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:246
+//line pkg/logql/syntax/expr.y:252
{
exprVAL.PipelineExpr = append(exprDollar[1].PipelineExpr, exprDollar[2].PipelineStage)
}
case 72:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:250
+//line pkg/logql/syntax/expr.y:256
{
exprVAL.PipelineStage = exprDollar[1].LineFilters
}
case 73:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:251
+//line pkg/logql/syntax/expr.y:257
{
exprVAL.PipelineStage = exprDollar[2].LabelParser
}
case 74:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:252
+//line pkg/logql/syntax/expr.y:258
{
exprVAL.PipelineStage = exprDollar[2].JSONExpressionParser
}
case 75:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:253
+//line pkg/logql/syntax/expr.y:259
{
exprVAL.PipelineStage = &LabelFilterExpr{LabelFilterer: exprDollar[2].LabelFilter}
}
case 76:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:254
+//line pkg/logql/syntax/expr.y:260
{
exprVAL.PipelineStage = exprDollar[2].LineFormatExpr
}
case 77:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:255
+//line pkg/logql/syntax/expr.y:261
{
exprVAL.PipelineStage = exprDollar[2].DecolorizeExpr
}
case 78:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:256
+//line pkg/logql/syntax/expr.y:262
{
exprVAL.PipelineStage = exprDollar[2].LabelFormatExpr
}
case 79:
+ exprDollar = exprS[exprpt-2 : exprpt+1]
+//line pkg/logql/syntax/expr.y:263
+ {
+ exprVAL.PipelineStage = exprDollar[2].DropLabelsExpr
+ }
+ case 80:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:260
+//line pkg/logql/syntax/expr.y:267
{
exprVAL.FilterOp = OpFilterIP
}
- case 80:
+ case 81:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:264
+//line pkg/logql/syntax/expr.y:271
{
exprVAL.LineFilter = newLineFilterExpr(exprDollar[1].Filter, "", exprDollar[2].str)
}
- case 81:
+ case 82:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:265
+//line pkg/logql/syntax/expr.y:272
{
exprVAL.LineFilter = newLineFilterExpr(exprDollar[1].Filter, exprDollar[2].FilterOp, exprDollar[4].str)
}
- case 82:
+ case 83:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:269
+//line pkg/logql/syntax/expr.y:276
{
exprVAL.LineFilters = exprDollar[1].LineFilter
}
- case 83:
+ case 84:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:270
+//line pkg/logql/syntax/expr.y:277
{
exprVAL.LineFilters = newNestedLineFilterExpr(exprDollar[1].LineFilters, exprDollar[2].LineFilter)
}
- case 84:
+ case 85:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:274
+//line pkg/logql/syntax/expr.y:281
{
exprVAL.LabelParser = newLabelParserExpr(OpParserTypeJSON, "")
}
- case 85:
+ case 86:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:275
+//line pkg/logql/syntax/expr.y:282
{
exprVAL.LabelParser = newLabelParserExpr(OpParserTypeLogfmt, "")
}
- case 86:
+ case 87:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:276
+//line pkg/logql/syntax/expr.y:283
{
exprVAL.LabelParser = newLabelParserExpr(OpParserTypeRegexp, exprDollar[2].str)
}
- case 87:
+ case 88:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:277
+//line pkg/logql/syntax/expr.y:284
{
exprVAL.LabelParser = newLabelParserExpr(OpParserTypeUnpack, "")
}
- case 88:
+ case 89:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:278
+//line pkg/logql/syntax/expr.y:285
{
exprVAL.LabelParser = newLabelParserExpr(OpParserTypePattern, exprDollar[2].str)
}
- case 89:
+ case 90:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:282
+//line pkg/logql/syntax/expr.y:289
{
exprVAL.JSONExpressionParser = newJSONExpressionParser(exprDollar[2].JSONExpressionList)
}
- case 90:
+ case 91:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:284
+//line pkg/logql/syntax/expr.y:291
{
exprVAL.LineFormatExpr = newLineFmtExpr(exprDollar[2].str)
}
- case 91:
+ case 92:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:286
+//line pkg/logql/syntax/expr.y:293
{
exprVAL.DecolorizeExpr = newDecolorizeExpr()
}
- case 92:
+ case 93:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:289
+//line pkg/logql/syntax/expr.y:296
{
exprVAL.LabelFormat = log.NewRenameLabelFmt(exprDollar[1].str, exprDollar[3].str)
}
- case 93:
+ case 94:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:290
+//line pkg/logql/syntax/expr.y:297
{
exprVAL.LabelFormat = log.NewTemplateLabelFmt(exprDollar[1].str, exprDollar[3].str)
}
- case 94:
+ case 95:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:294
+//line pkg/logql/syntax/expr.y:301
{
exprVAL.LabelsFormat = []log.LabelFmt{exprDollar[1].LabelFormat}
}
- case 95:
+ case 96:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:295
+//line pkg/logql/syntax/expr.y:302
{
exprVAL.LabelsFormat = append(exprDollar[1].LabelsFormat, exprDollar[3].LabelFormat)
}
- case 97:
+ case 98:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:299
+//line pkg/logql/syntax/expr.y:307
{
exprVAL.LabelFormatExpr = newLabelFmtExpr(exprDollar[2].LabelsFormat)
}
- case 98:
+ case 99:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:302
+//line pkg/logql/syntax/expr.y:310
{
exprVAL.LabelFilter = log.NewStringLabelFilter(exprDollar[1].Matcher)
}
- case 99:
+ case 100:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:303
+//line pkg/logql/syntax/expr.y:311
{
exprVAL.LabelFilter = exprDollar[1].IPLabelFilter
}
- case 100:
+ case 101:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:304
+//line pkg/logql/syntax/expr.y:312
{
exprVAL.LabelFilter = exprDollar[1].UnitFilter
}
- case 101:
+ case 102:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:305
+//line pkg/logql/syntax/expr.y:313
{
exprVAL.LabelFilter = exprDollar[1].NumberFilter
}
- case 102:
+ case 103:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:306
+//line pkg/logql/syntax/expr.y:314
{
exprVAL.LabelFilter = exprDollar[2].LabelFilter
}
- case 103:
+ case 104:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:307
+//line pkg/logql/syntax/expr.y:315
{
exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[2].LabelFilter)
}
- case 104:
+ case 105:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:308
+//line pkg/logql/syntax/expr.y:316
{
exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter)
}
- case 105:
+ case 106:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:309
+//line pkg/logql/syntax/expr.y:317
{
exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter)
}
- case 106:
+ case 107:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:310
+//line pkg/logql/syntax/expr.y:318
{
exprVAL.LabelFilter = log.NewOrLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter)
}
- case 107:
+ case 108:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:314
+//line pkg/logql/syntax/expr.y:322
{
exprVAL.JSONExpression = log.NewJSONExpr(exprDollar[1].str, exprDollar[3].str)
}
- case 108:
+ case 109:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:315
+//line pkg/logql/syntax/expr.y:323
{
exprVAL.JSONExpression = log.NewJSONExpr(exprDollar[1].str, exprDollar[1].str)
}
- case 109:
+ case 110:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:318
+//line pkg/logql/syntax/expr.y:326
{
exprVAL.JSONExpressionList = []log.JSONExpression{exprDollar[1].JSONExpression}
}
- case 110:
+ case 111:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:319
+//line pkg/logql/syntax/expr.y:327
{
exprVAL.JSONExpressionList = append(exprDollar[1].JSONExpressionList, exprDollar[3].JSONExpression)
}
- case 111:
+ case 112:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:323
+//line pkg/logql/syntax/expr.y:331
{
exprVAL.IPLabelFilter = log.NewIPLabelFilter(exprDollar[5].str, exprDollar[1].str, log.LabelFilterEqual)
}
- case 112:
+ case 113:
exprDollar = exprS[exprpt-6 : exprpt+1]
-//line pkg/logql/syntax/expr.y:324
+//line pkg/logql/syntax/expr.y:332
{
exprVAL.IPLabelFilter = log.NewIPLabelFilter(exprDollar[5].str, exprDollar[1].str, log.LabelFilterNotEqual)
}
- case 113:
+ case 114:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:328
+//line pkg/logql/syntax/expr.y:336
{
exprVAL.UnitFilter = exprDollar[1].DurationFilter
}
- case 114:
+ case 115:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:329
+//line pkg/logql/syntax/expr.y:337
{
exprVAL.UnitFilter = exprDollar[1].BytesFilter
}
- case 115:
- exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:332
- {
- exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].duration)
- }
case 116:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:333
+//line pkg/logql/syntax/expr.y:340
{
- exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].duration)
+ exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].duration)
}
case 117:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:334
+//line pkg/logql/syntax/expr.y:341
{
- exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].duration)
+ exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].duration)
}
case 118:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:335
+//line pkg/logql/syntax/expr.y:342
{
- exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].duration)
+ exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].duration)
}
case 119:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:336
+//line pkg/logql/syntax/expr.y:343
{
- exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].duration)
+ exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].duration)
}
case 120:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:337
+//line pkg/logql/syntax/expr.y:344
{
- exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration)
+ exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].duration)
}
case 121:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:338
+//line pkg/logql/syntax/expr.y:345
{
exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration)
}
case 122:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:342
+//line pkg/logql/syntax/expr.y:346
{
- exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].bytes)
+ exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration)
}
case 123:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:343
+//line pkg/logql/syntax/expr.y:350
{
- exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].bytes)
+ exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].bytes)
}
case 124:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:344
+//line pkg/logql/syntax/expr.y:351
{
- exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].bytes)
+ exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].bytes)
}
case 125:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:345
+//line pkg/logql/syntax/expr.y:352
{
- exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].bytes)
+ exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].bytes)
}
case 126:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:346
+//line pkg/logql/syntax/expr.y:353
{
- exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].bytes)
+ exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].bytes)
}
case 127:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:347
+//line pkg/logql/syntax/expr.y:354
{
- exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes)
+ exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].bytes)
}
case 128:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:348
+//line pkg/logql/syntax/expr.y:355
{
exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes)
}
case 129:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:352
+//line pkg/logql/syntax/expr.y:356
{
- exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
+ exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes)
}
case 130:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:353
+//line pkg/logql/syntax/expr.y:360
{
- exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
+ exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
}
case 131:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:354
+//line pkg/logql/syntax/expr.y:361
{
- exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
+ exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
}
case 132:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:355
+//line pkg/logql/syntax/expr.y:362
{
- exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
+ exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
}
case 133:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:356
+//line pkg/logql/syntax/expr.y:363
{
- exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
+ exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
}
case 134:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:357
+//line pkg/logql/syntax/expr.y:364
{
- exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
+ exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
}
case 135:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:358
+//line pkg/logql/syntax/expr.y:365
{
exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
}
case 136:
+ exprDollar = exprS[exprpt-3 : exprpt+1]
+//line pkg/logql/syntax/expr.y:366
+ {
+ exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, mustNewFloat(exprDollar[3].str))
+ }
+ case 137:
+ exprDollar = exprS[exprpt-1 : exprpt+1]
+//line pkg/logql/syntax/expr.y:370
+ {
+ exprVAL.DropLabel = log.NewDropLabel(nil, exprDollar[1].str)
+ }
+ case 138:
+ exprDollar = exprS[exprpt-1 : exprpt+1]
+//line pkg/logql/syntax/expr.y:371
+ {
+ exprVAL.DropLabel = log.NewDropLabel(exprDollar[1].Matcher, "")
+ }
+ case 139:
+ exprDollar = exprS[exprpt-1 : exprpt+1]
+//line pkg/logql/syntax/expr.y:374
+ {
+ exprVAL.DropLabels = []log.DropLabel{exprDollar[1].DropLabel}
+ }
+ case 140:
+ exprDollar = exprS[exprpt-3 : exprpt+1]
+//line pkg/logql/syntax/expr.y:375
+ {
+ exprVAL.DropLabels = append(exprDollar[1].DropLabels, exprDollar[3].DropLabel)
+ }
+ case 141:
+ exprDollar = exprS[exprpt-2 : exprpt+1]
+//line pkg/logql/syntax/expr.y:378
+ {
+ exprVAL.DropLabelsExpr = newDropLabelsExpr(exprDollar[2].DropLabels)
+ }
+ case 142:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:363
+//line pkg/logql/syntax/expr.y:382
{
exprVAL.BinOpExpr = mustNewBinOpExpr("or", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 137:
+ case 143:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:364
+//line pkg/logql/syntax/expr.y:383
{
exprVAL.BinOpExpr = mustNewBinOpExpr("and", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 138:
+ case 144:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:365
+//line pkg/logql/syntax/expr.y:384
{
exprVAL.BinOpExpr = mustNewBinOpExpr("unless", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 139:
+ case 145:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:366
+//line pkg/logql/syntax/expr.y:385
{
exprVAL.BinOpExpr = mustNewBinOpExpr("+", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 140:
+ case 146:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:367
+//line pkg/logql/syntax/expr.y:386
{
exprVAL.BinOpExpr = mustNewBinOpExpr("-", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 141:
+ case 147:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:368
+//line pkg/logql/syntax/expr.y:387
{
exprVAL.BinOpExpr = mustNewBinOpExpr("*", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 142:
+ case 148:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:369
+//line pkg/logql/syntax/expr.y:388
{
exprVAL.BinOpExpr = mustNewBinOpExpr("/", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 143:
+ case 149:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:370
+//line pkg/logql/syntax/expr.y:389
{
exprVAL.BinOpExpr = mustNewBinOpExpr("%", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 144:
+ case 150:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:371
+//line pkg/logql/syntax/expr.y:390
{
exprVAL.BinOpExpr = mustNewBinOpExpr("^", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 145:
+ case 151:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:372
+//line pkg/logql/syntax/expr.y:391
{
exprVAL.BinOpExpr = mustNewBinOpExpr("==", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 146:
+ case 152:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:373
+//line pkg/logql/syntax/expr.y:392
{
exprVAL.BinOpExpr = mustNewBinOpExpr("!=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 147:
+ case 153:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:374
+//line pkg/logql/syntax/expr.y:393
{
exprVAL.BinOpExpr = mustNewBinOpExpr(">", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 148:
+ case 154:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:375
+//line pkg/logql/syntax/expr.y:394
{
exprVAL.BinOpExpr = mustNewBinOpExpr(">=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 149:
+ case 155:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:376
+//line pkg/logql/syntax/expr.y:395
{
exprVAL.BinOpExpr = mustNewBinOpExpr("<", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 150:
+ case 156:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:377
+//line pkg/logql/syntax/expr.y:396
{
exprVAL.BinOpExpr = mustNewBinOpExpr("<=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr)
}
- case 151:
+ case 157:
exprDollar = exprS[exprpt-0 : exprpt+1]
-//line pkg/logql/syntax/expr.y:381
+//line pkg/logql/syntax/expr.y:400
{
exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}}
}
- case 152:
+ case 158:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:385
+//line pkg/logql/syntax/expr.y:404
{
exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: true}
}
- case 153:
+ case 159:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:392
+//line pkg/logql/syntax/expr.y:411
{
exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier
exprVAL.OnOrIgnoringModifier.VectorMatching.On = true
exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels
}
- case 154:
+ case 160:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:398
+//line pkg/logql/syntax/expr.y:417
{
exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier
exprVAL.OnOrIgnoringModifier.VectorMatching.On = true
}
- case 155:
+ case 161:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:403
+//line pkg/logql/syntax/expr.y:422
{
exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier
exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels
}
- case 156:
+ case 162:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:408
+//line pkg/logql/syntax/expr.y:427
{
exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier
}
- case 157:
+ case 163:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:414
+//line pkg/logql/syntax/expr.y:433
{
exprVAL.BinOpModifier = exprDollar[1].BoolModifier
}
- case 158:
+ case 164:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:415
+//line pkg/logql/syntax/expr.y:434
{
exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier
}
- case 159:
+ case 165:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:417
+//line pkg/logql/syntax/expr.y:436
{
exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier
exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne
}
- case 160:
+ case 166:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:422
+//line pkg/logql/syntax/expr.y:441
{
exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier
exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne
}
- case 161:
+ case 167:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:427
+//line pkg/logql/syntax/expr.y:446
{
exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier
exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne
exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels
}
- case 162:
+ case 168:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:433
+//line pkg/logql/syntax/expr.y:452
{
exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier
exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany
}
- case 163:
+ case 169:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:438
+//line pkg/logql/syntax/expr.y:457
{
exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier
exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany
}
- case 164:
+ case 170:
exprDollar = exprS[exprpt-5 : exprpt+1]
-//line pkg/logql/syntax/expr.y:443
+//line pkg/logql/syntax/expr.y:462
{
exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier
exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany
exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels
}
- case 165:
+ case 171:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:451
+//line pkg/logql/syntax/expr.y:470
{
exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[1].str, false)
}
- case 166:
+ case 172:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:452
+//line pkg/logql/syntax/expr.y:471
{
exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, false)
}
- case 167:
+ case 173:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:453
+//line pkg/logql/syntax/expr.y:472
{
exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, true)
}
- case 168:
+ case 174:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:457
+//line pkg/logql/syntax/expr.y:476
{
exprVAL.VectorExpr = NewVectorExpr(exprDollar[3].str)
}
- case 169:
+ case 175:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:460
+//line pkg/logql/syntax/expr.y:479
{
exprVAL.Vector = OpTypeVector
}
- case 170:
+ case 176:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:464
+//line pkg/logql/syntax/expr.y:483
{
exprVAL.VectorOp = OpTypeSum
}
- case 171:
+ case 177:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:465
+//line pkg/logql/syntax/expr.y:484
{
exprVAL.VectorOp = OpTypeAvg
}
- case 172:
+ case 178:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:466
+//line pkg/logql/syntax/expr.y:485
{
exprVAL.VectorOp = OpTypeCount
}
- case 173:
+ case 179:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:467
+//line pkg/logql/syntax/expr.y:486
{
exprVAL.VectorOp = OpTypeMax
}
- case 174:
+ case 180:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:468
+//line pkg/logql/syntax/expr.y:487
{
exprVAL.VectorOp = OpTypeMin
}
- case 175:
+ case 181:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:469
+//line pkg/logql/syntax/expr.y:488
{
exprVAL.VectorOp = OpTypeStddev
}
- case 176:
+ case 182:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:470
+//line pkg/logql/syntax/expr.y:489
{
exprVAL.VectorOp = OpTypeStdvar
}
- case 177:
+ case 183:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:471
+//line pkg/logql/syntax/expr.y:490
{
exprVAL.VectorOp = OpTypeBottomK
}
- case 178:
+ case 184:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:472
+//line pkg/logql/syntax/expr.y:491
{
exprVAL.VectorOp = OpTypeTopK
}
- case 179:
+ case 185:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:473
+//line pkg/logql/syntax/expr.y:492
{
exprVAL.VectorOp = OpTypeSort
}
- case 180:
+ case 186:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:474
+//line pkg/logql/syntax/expr.y:493
{
exprVAL.VectorOp = OpTypeSortDesc
}
- case 181:
+ case 187:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:478
+//line pkg/logql/syntax/expr.y:497
{
exprVAL.RangeOp = OpRangeTypeCount
}
- case 182:
+ case 188:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:479
+//line pkg/logql/syntax/expr.y:498
{
exprVAL.RangeOp = OpRangeTypeRate
}
- case 183:
+ case 189:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:480
+//line pkg/logql/syntax/expr.y:499
{
exprVAL.RangeOp = OpRangeTypeRateCounter
}
- case 184:
+ case 190:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:481
+//line pkg/logql/syntax/expr.y:500
{
exprVAL.RangeOp = OpRangeTypeBytes
}
- case 185:
+ case 191:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:482
+//line pkg/logql/syntax/expr.y:501
{
exprVAL.RangeOp = OpRangeTypeBytesRate
}
- case 186:
+ case 192:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:483
+//line pkg/logql/syntax/expr.y:502
{
exprVAL.RangeOp = OpRangeTypeAvg
}
- case 187:
+ case 193:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:484
+//line pkg/logql/syntax/expr.y:503
{
exprVAL.RangeOp = OpRangeTypeSum
}
- case 188:
+ case 194:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:485
+//line pkg/logql/syntax/expr.y:504
{
exprVAL.RangeOp = OpRangeTypeMin
}
- case 189:
+ case 195:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:486
+//line pkg/logql/syntax/expr.y:505
{
exprVAL.RangeOp = OpRangeTypeMax
}
- case 190:
+ case 196:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:487
+//line pkg/logql/syntax/expr.y:506
{
exprVAL.RangeOp = OpRangeTypeStdvar
}
- case 191:
+ case 197:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:488
+//line pkg/logql/syntax/expr.y:507
{
exprVAL.RangeOp = OpRangeTypeStddev
}
- case 192:
+ case 198:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:489
+//line pkg/logql/syntax/expr.y:508
{
exprVAL.RangeOp = OpRangeTypeQuantile
}
- case 193:
+ case 199:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:490
+//line pkg/logql/syntax/expr.y:509
{
exprVAL.RangeOp = OpRangeTypeFirst
}
- case 194:
+ case 200:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:491
+//line pkg/logql/syntax/expr.y:510
{
exprVAL.RangeOp = OpRangeTypeLast
}
- case 195:
+ case 201:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:492
+//line pkg/logql/syntax/expr.y:511
{
exprVAL.RangeOp = OpRangeTypeAbsent
}
- case 196:
+ case 202:
exprDollar = exprS[exprpt-2 : exprpt+1]
-//line pkg/logql/syntax/expr.y:496
+//line pkg/logql/syntax/expr.y:515
{
exprVAL.OffsetExpr = newOffsetExpr(exprDollar[2].duration)
}
- case 197:
+ case 203:
exprDollar = exprS[exprpt-1 : exprpt+1]
-//line pkg/logql/syntax/expr.y:499
+//line pkg/logql/syntax/expr.y:518
{
exprVAL.Labels = []string{exprDollar[1].str}
}
- case 198:
+ case 204:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:500
+//line pkg/logql/syntax/expr.y:519
{
exprVAL.Labels = append(exprDollar[1].Labels, exprDollar[3].str)
}
- case 199:
+ case 205:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:504
+//line pkg/logql/syntax/expr.y:523
{
exprVAL.Grouping = &Grouping{Without: false, Groups: exprDollar[3].Labels}
}
- case 200:
+ case 206:
exprDollar = exprS[exprpt-4 : exprpt+1]
-//line pkg/logql/syntax/expr.y:505
+//line pkg/logql/syntax/expr.y:524
{
exprVAL.Grouping = &Grouping{Without: true, Groups: exprDollar[3].Labels}
}
- case 201:
+ case 207:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:506
+//line pkg/logql/syntax/expr.y:525
{
exprVAL.Grouping = &Grouping{Without: false, Groups: nil}
}
- case 202:
+ case 208:
exprDollar = exprS[exprpt-3 : exprpt+1]
-//line pkg/logql/syntax/expr.y:507
+//line pkg/logql/syntax/expr.y:526
{
exprVAL.Grouping = &Grouping{Without: true, Groups: nil}
}
diff --git a/pkg/logql/syntax/lex.go b/pkg/logql/syntax/lex.go
index ec9d9fdd832b2..5a9783651a9a7 100644
--- a/pkg/logql/syntax/lex.go
+++ b/pkg/logql/syntax/lex.go
@@ -72,6 +72,9 @@ var tokens = map[string]int{
// filter functions
OpFilterIP: IP,
OpDecolorize: DECOLORIZE,
+
+ // drop labels
+ OpDrop: DROP,
}
// functionTokens are tokens that needs to be suffixes with parenthesis
diff --git a/pkg/logql/syntax/prettier.go b/pkg/logql/syntax/prettier.go
index ba4a5f66fdb73..297cd25e8acb5 100644
--- a/pkg/logql/syntax/prettier.go
+++ b/pkg/logql/syntax/prettier.go
@@ -110,6 +110,10 @@ func (e *LabelParserExpr) Pretty(level int) string {
return commonPrefixIndent(level, e)
}
+func (e *DropLabelsExpr) Pretty(level int) string {
+ return commonPrefixIndent(level, e)
+}
+
// e.g: | level!="error"
func (e *LabelFilterExpr) Pretty(level int) string {
return commonPrefixIndent(level, e)
|
feat
|
Support drop labels in logql pipeline (#7975)
|
08ac5336d57dfebfdd11d0c4d3b9402b1d5fee61
|
2023-03-02 03:17:39
|
Bryan Boreham
|
wal: store pointers in record pool (#8667)
| false
|
diff --git a/clients/pkg/promtail/wal/wal.go b/clients/pkg/promtail/wal/wal.go
index b9ec9a0c6fb7b..7129f2821c1ad 100644
--- a/clients/pkg/promtail/wal/wal.go
+++ b/clients/pkg/promtail/wal/wal.go
@@ -76,17 +76,17 @@ func (w *wrapper) Log(record *wal.Record) error {
// logBatched logs to the WAL both series and records, batching the operation to prevent unnecessary page flushes.
func (w *wrapper) logBatched(record *wal.Record) error {
- seriesBuf := recordPool.GetBytes()[:0]
- entriesBuf := recordPool.GetBytes()[:0]
+ seriesBuf := recordPool.GetBytes()
+ entriesBuf := recordPool.GetBytes()
defer func() {
recordPool.PutBytes(seriesBuf)
recordPool.PutBytes(entriesBuf)
}()
- seriesBuf = record.EncodeSeries(seriesBuf)
- entriesBuf = record.EncodeEntries(wal.CurrentEntriesRec, entriesBuf)
+ *seriesBuf = record.EncodeSeries(*seriesBuf)
+ *entriesBuf = record.EncodeEntries(wal.CurrentEntriesRec, *entriesBuf)
// Always write series then entries
- if err := w.wal.Log(seriesBuf, entriesBuf); err != nil {
+ if err := w.wal.Log(*seriesBuf, *entriesBuf); err != nil {
return err
}
return nil
@@ -94,22 +94,22 @@ func (w *wrapper) logBatched(record *wal.Record) error {
// logSingle logs to the WAL series and records in separate WAL operation. This causes a page flush after each operation.
func (w *wrapper) logSingle(record *wal.Record) error {
- buf := recordPool.GetBytes()[:0]
+ buf := recordPool.GetBytes()
defer func() {
recordPool.PutBytes(buf)
}()
// Always write series then entries.
if len(record.Series) > 0 {
- buf = record.EncodeSeries(buf)
- if err := w.wal.Log(buf); err != nil {
+ *buf = record.EncodeSeries(*buf)
+ if err := w.wal.Log(*buf); err != nil {
return err
}
- buf = buf[:0]
+ *buf = (*buf)[:0]
}
if len(record.RefEntries) > 0 {
- buf = record.EncodeEntries(wal.CurrentEntriesRec, buf)
- if err := w.wal.Log(buf); err != nil {
+ *buf = record.EncodeEntries(wal.CurrentEntriesRec, *buf)
+ if err := w.wal.Log(*buf); err != nil {
return err
}
diff --git a/pkg/ingester/wal.go b/pkg/ingester/wal.go
index dfb248a061775..2a19ee4aa16b5 100644
--- a/pkg/ingester/wal.go
+++ b/pkg/ingester/wal.go
@@ -110,28 +110,28 @@ func (w *walWrapper) Log(record *wal.Record) error {
case <-w.quit:
return nil
default:
- buf := recordPool.GetBytes()[:0]
+ buf := recordPool.GetBytes()
defer func() {
recordPool.PutBytes(buf)
}()
// Always write series then entries.
if len(record.Series) > 0 {
- buf = record.EncodeSeries(buf)
- if err := w.wal.Log(buf); err != nil {
+ *buf = record.EncodeSeries(*buf)
+ if err := w.wal.Log(*buf); err != nil {
return err
}
w.metrics.walRecordsLogged.Inc()
- w.metrics.walLoggedBytesTotal.Add(float64(len(buf)))
- buf = buf[:0]
+ w.metrics.walLoggedBytesTotal.Add(float64(len(*buf)))
+ *buf = (*buf)[:0]
}
if len(record.RefEntries) > 0 {
- buf = record.EncodeEntries(wal.CurrentEntriesRec, buf)
- if err := w.wal.Log(buf); err != nil {
+ *buf = record.EncodeEntries(wal.CurrentEntriesRec, *buf)
+ if err := w.wal.Log(*buf); err != nil {
return err
}
w.metrics.walRecordsLogged.Inc()
- w.metrics.walLoggedBytesTotal.Add(float64(len(buf)))
+ w.metrics.walLoggedBytesTotal.Add(float64(len(*buf)))
}
return nil
}
diff --git a/pkg/ingester/wal/encoding_test.go b/pkg/ingester/wal/encoding_test.go
index 79eff31ec0d3c..113b87bc2d8a6 100644
--- a/pkg/ingester/wal/encoding_test.go
+++ b/pkg/ingester/wal/encoding_test.go
@@ -166,11 +166,11 @@ func Benchmark_EncodeEntries(b *testing.B) {
}
b.ReportAllocs()
b.ResetTimer()
- buf := recordPool.GetBytes()[:0]
+ buf := recordPool.GetBytes()
defer recordPool.PutBytes(buf)
for n := 0; n < b.N; n++ {
- record.EncodeEntries(CurrentEntriesRec, buf)
+ *buf = record.EncodeEntries(CurrentEntriesRec, *buf)
}
}
diff --git a/pkg/ingester/wal/recordpool.go b/pkg/ingester/wal/recordpool.go
index 0c74f54f21f0b..cc22b3f80d248 100644
--- a/pkg/ingester/wal/recordpool.go
+++ b/pkg/ingester/wal/recordpool.go
@@ -26,7 +26,8 @@ func NewRecordPool() *ResettingPool {
},
bPool: &sync.Pool{
New: func() interface{} {
- return make([]byte, 0, 1<<10) // 1kb
+ buf := make([]byte, 0, 1<<10) // 1kb
+ return &buf
},
},
}
@@ -53,10 +54,11 @@ func (p *ResettingPool) PutEntries(es []logproto.Entry) {
p.ePool.Put(es[:0]) // nolint:staticcheck
}
-func (p *ResettingPool) GetBytes() []byte {
- return p.bPool.Get().([]byte)
+func (p *ResettingPool) GetBytes() *[]byte {
+ return p.bPool.Get().(*[]byte)
}
-func (p *ResettingPool) PutBytes(b []byte) {
- p.bPool.Put(b[:0]) // nolint:staticcheck
+func (p *ResettingPool) PutBytes(b *[]byte) {
+ *b = (*b)[:0]
+ p.bPool.Put(b)
}
|
wal
|
store pointers in record pool (#8667)
|
f7f09e2226d042f8978f3e9b2b2e62eeb6c289f0
|
2019-05-30 17:31:34
|
Sandeep Sukhani
|
limits: Reject entries based on age set in limits (#631)
| false
|
diff --git a/cmd/loki/loki-local-config.yaml b/cmd/loki/loki-local-config.yaml
index 4e6978c494864..17e3511305ee5 100644
--- a/cmd/loki/loki-local-config.yaml
+++ b/cmd/loki/loki-local-config.yaml
@@ -31,6 +31,8 @@ storage_config:
limits_config:
enforce_metric_name: false
+ reject_old_samples: true
+ reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index a768c98bb0d22..a0063b88b26ff 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -5,6 +5,7 @@ import (
"flag"
"hash/fnv"
"sync/atomic"
+ "time"
cortex_client "github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/cortexproject/cortex/pkg/ring"
@@ -21,6 +22,8 @@ import (
"github.com/grafana/loki/pkg/util"
)
+const metricName = "logs"
+
var (
ingesterAppends = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "loki",
@@ -130,6 +133,21 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log
continue
}
+ entries := make([]logproto.Entry, 0, len(stream.Entries))
+ for _, entry := range stream.Entries {
+ if err := d.overrides.ValidateSample(userID, metricName, cortex_client.Sample{
+ TimestampMs: entry.Timestamp.UnixNano() / int64(time.Millisecond),
+ }); err != nil {
+ validationErr = err
+ continue
+ }
+ entries = append(entries, entry)
+ }
+
+ if len(entries) == 0 {
+ continue
+ }
+ stream.Entries = entries
keys = append(keys, tokenFor(userID, stream.Labels))
streams = append(streams, streamTracker{
stream: stream,
diff --git a/production/helm/loki-stack/Chart.yaml b/production/helm/loki-stack/Chart.yaml
index 1209a6ad8b40a..b4df444c686cb 100644
--- a/production/helm/loki-stack/Chart.yaml
+++ b/production/helm/loki-stack/Chart.yaml
@@ -1,5 +1,5 @@
name: loki-stack
-version: 0.10.0
+version: 0.10.1
appVersion: 0.0.1
kubeVersion: "^1.10.0-0"
description: "Loki: like Prometheus, but for logs."
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index d0ef9035a0572..22ce5789e6d2d 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -1,5 +1,5 @@
name: loki
-version: 0.9.0
+version: 0.9.1
appVersion: 0.0.1
kubeVersion: "^1.10.0-0"
description: "Loki: like Prometheus, but for logs."
diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml
index c1825e85b9876..e871de7ab1cb0 100644
--- a/production/helm/loki/values.yaml
+++ b/production/helm/loki/values.yaml
@@ -40,6 +40,8 @@ config:
# consistentreads: true
limits_config:
enforce_metric_name: false
+ reject_old_samples: true
+ reject_old_samples_max_age: 168h
schema_config:
configs:
- from: 2018-04-15
diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet
index b1dd463c98450..7eaedd333b723 100644
--- a/production/ksonnet/loki/config.libsonnet
+++ b/production/ksonnet/loki/config.libsonnet
@@ -33,6 +33,8 @@
limits_config: {
enforce_metric_name: false,
+ reject_old_samples: true,
+ reject_old_samples_max_age: '168h',
},
ingester: {
|
limits
|
Reject entries based on age set in limits (#631)
|
f5d62bd505c19ceb287bbcb65359156f7273e75f
|
2024-12-10 00:16:29
|
Trevor Whitney
|
feat: present DF bytes values in queryable format (#15272)
| false
|
diff --git a/pkg/querier/queryrange/detected_fields.go b/pkg/querier/queryrange/detected_fields.go
index 3248d3b2cda81..be42d1f0a6868 100644
--- a/pkg/querier/queryrange/detected_fields.go
+++ b/pkg/querier/queryrange/detected_fields.go
@@ -5,6 +5,7 @@ import (
"net/http"
"slices"
"strconv"
+ "strings"
"time"
"github.com/axiomhq/hyperloglog"
@@ -114,7 +115,13 @@ func parseDetectedFieldValues(limit uint32, streams []push.Stream, name string)
parsedLabels, _ := parseEntry(entry, entryLbls)
if vals, ok := parsedLabels[name]; ok {
for _, v := range vals {
- values[v] = struct{}{}
+ // special case bytes values, so they can be directly inserted into a query
+ if bs, err := humanize.ParseBytes(v); err == nil {
+ bsString := strings.Replace(humanize.Bytes(bs), " ", "", 1)
+ values[bsString] = struct{}{}
+ } else {
+ values[v] = struct{}{}
+ }
}
}
}
diff --git a/pkg/querier/queryrange/detected_fields_test.go b/pkg/querier/queryrange/detected_fields_test.go
index b0b363e4735d1..22aa63e284888 100644
--- a/pkg/querier/queryrange/detected_fields_test.go
+++ b/pkg/querier/queryrange/detected_fields_test.go
@@ -24,7 +24,7 @@ import (
"github.com/grafana/loki/pkg/push"
)
-func Test_parseDetectedFeilds(t *testing.T) {
+func Test_parseDetectedFields(t *testing.T) {
now := time.Now()
t.Run("when no parsers are supplied", func(t *testing.T) {
@@ -1317,6 +1317,70 @@ func TestQuerier_DetectedFields(t *testing.T) {
}, secondValues)
},
)
+
+ t.Run("correctly formats bytes values for detected fields", func(t *testing.T) {
+ lbls := `{cluster="us-east-1", namespace="mimir-dev", pod="mimir-ruler-nfb37", service_name="mimir-ruler"}`
+ metric, err := parser.ParseMetric(lbls)
+ require.NoError(t, err)
+ now := time.Now()
+
+ infoDetectdFiledMetadata := []push.LabelAdapter{
+ {
+ Name: "detected_level",
+ Value: "info",
+ },
+ }
+
+ lines := []push.Entry{
+ {
+ Timestamp: now,
+ Line: "ts=2024-09-05T15:36:38.757788067Z caller=metrics.go:66 tenant=2419 level=info bytes=1,024",
+ StructuredMetadata: infoDetectdFiledMetadata,
+ },
+ {
+ Timestamp: now,
+ Line: `ts=2024-09-05T15:36:38.698375619Z caller=grpc_logging.go:66 tenant=29 level=info bytes="1024 MB"`,
+ StructuredMetadata: infoDetectdFiledMetadata,
+ },
+ {
+ Timestamp: now,
+ Line: "ts=2024-09-05T15:36:38.629424175Z caller=grpc_logging.go:66 tenant=2919 level=info bytes=1024KB",
+ StructuredMetadata: infoDetectdFiledMetadata,
+ },
+ }
+ stream := push.Stream{
+ Labels: lbls,
+ Entries: lines,
+ Hash: metric.Hash(),
+ }
+
+ handler := NewDetectedFieldsHandler(
+ limitedHandler(stream),
+ logHandler(stream),
+ limits,
+ )
+
+ request := DetectedFieldsRequest{
+ logproto.DetectedFieldsRequest{
+ Start: time.Now().Add(-1 * time.Minute),
+ End: time.Now(),
+ Query: `{cluster="us-east-1"} | logfmt`,
+ LineLimit: 1000,
+ Limit: 3,
+ Values: true,
+ Name: "bytes",
+ },
+ "/loki/api/v1/detected_field/bytes/values",
+ }
+
+ detectedFieldValues := handleRequest(handler, request).Values
+ slices.Sort(detectedFieldValues)
+ require.Equal(t, []string{
+ "1.0GB",
+ "1.0MB",
+ "1.0kB",
+ }, detectedFieldValues)
+ })
}
func BenchmarkQuerierDetectedFields(b *testing.B) {
|
feat
|
present DF bytes values in queryable format (#15272)
|
7a5c37f77ea6cc6f9f4327df3e4da6f07a0cc87f
|
2025-01-24 20:50:47
|
renovate[bot]
|
chore(deps): update helm/kind-action action to v1.12.0 (main) (#15949)
| false
|
diff --git a/.github/workflows/helm-ci.yml b/.github/workflows/helm-ci.yml
index cca0f014b8d1d..704dcf1562489 100644
--- a/.github/workflows/helm-ci.yml
+++ b/.github/workflows/helm-ci.yml
@@ -51,7 +51,7 @@ jobs:
timeout-minutes: 10
- name: Create kind cluster
- uses: helm/[email protected]
+ uses: helm/[email protected]
if: steps.list-changed.outputs.changed == 'true'
- name: Install prometheus operator
|
chore
|
update helm/kind-action action to v1.12.0 (main) (#15949)
|
3a3df62c59d8d2f0cb4b9e39844bda9133caa772
|
2025-03-13 19:15:09
|
Periklis Tsirakidis
|
feat(ingest-limits): Implement global tenant rate limiting (#16727)
| false
|
diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md
index 866e227197fad..d3682e6b1d679 100644
--- a/docs/sources/shared/configuration.md
+++ b/docs/sources/shared/configuration.md
@@ -883,6 +883,16 @@ ingest_limits:
# CLI flag: -ingest-limits.window-size
[window_size: <duration> | default = 1h]
+ # The time window for rate calculation. This should match the window used in
+ # Prometheus rate() queries for consistency.
+ # CLI flag: -ingest-limits.rate-window
+ [rate_window: <duration> | default = 5m]
+
+ # The granularity of time buckets used for sliding window rate calculation.
+ # Smaller buckets provide more precise rate tracking but require more memory.
+ # CLI flag: -ingest-limits.bucket-duration
+ [bucket_duration: <duration> | default = 1m]
+
lifecycler:
ring:
kvstore:
@@ -1186,6 +1196,10 @@ ingest_limits_frontend:
# CLI flag: -ingest-limits-frontend.lifecycler.ID
[id: <string> | default = "<hostname>"]
+ # The period to recheck per tenant ingestion rate limit configuration.
+ # CLI flag: -ingest-limits-frontend.recheck-period
+ [recheck_period: <duration> | default = 10s]
+
ingest_limits_frontend_client:
# Configures client gRPC connections to limits service.
# The CLI flags prefix for this block configuration is:
diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go
index f62276a9203bd..f6ce1aba0147f 100644
--- a/pkg/distributor/distributor.go
+++ b/pkg/distributor/distributor.go
@@ -1161,21 +1161,27 @@ func (d *Distributor) exceedsLimits(ctx context.Context, tenantID string, stream
// limits-frontend. The limits-frontend is responsible for deciding if
// the request would exceed the tenants limits, and if so, which streams
// from the request caused it to exceed its limits.
- streamHashes := make([]*logproto.StreamMetadata, 0, len(streams))
+ streamMetadata := make([]*logproto.StreamMetadata, 0, len(streams))
for _, stream := range streams {
// Add the stream hash to FNV-1.
buf := make([]byte, binary.MaxVarintLen64)
binary.PutUvarint(buf, stream.HashKeyNoShard)
_, _ = h.Write(buf)
+
+ // Calculate the size of the stream.
+ entriesSize, structuredMetadataSize := calculateStreamSizes(stream.Stream)
+
// Add the stream hash to the request. This is sent to limits-frontend.
- streamHashes = append(streamHashes, &logproto.StreamMetadata{
- StreamHash: stream.HashKeyNoShard,
+ streamMetadata = append(streamMetadata, &logproto.StreamMetadata{
+ StreamHash: stream.HashKeyNoShard,
+ EntriesSize: entriesSize,
+ StructuredMetadataSize: structuredMetadataSize,
})
}
req := logproto.ExceedsLimitsRequest{
Tenant: tenantID,
- Streams: streamHashes,
+ Streams: streamMetadata,
}
// Get the limits-frontend instances from the ring.
@@ -1275,6 +1281,8 @@ func (d *Distributor) sendStreamToKafka(ctx context.Context, stream KeyedStream,
return fmt.Errorf("failed to marshal write request to records: %w", err)
}
+ entriesSize, structuredMetadataSize := calculateStreamSizes(stream.Stream)
+
// However, unlike stream records, the distributor writes stream metadata
// records to one of a fixed number of partitions, the size of which is
// determined ahead of time. It does not use a ring. The reason for this
@@ -1286,6 +1294,8 @@ func (d *Distributor) sendStreamToKafka(ctx context.Context, stream KeyedStream,
d.cfg.KafkaConfig.Topic,
tenant,
stream.HashKeyNoShard,
+ entriesSize,
+ structuredMetadataSize,
)
if err != nil {
return fmt.Errorf("failed to marshal metadata: %w", err)
@@ -1403,6 +1413,15 @@ func calculateShards(rate int64, pushSize, desiredRate int) int {
return int(math.Ceil(shards))
}
+func calculateStreamSizes(stream logproto.Stream) (uint64, uint64) {
+ var entriesSize, structuredMetadataSize uint64
+ for _, entry := range stream.Entries {
+ entriesSize += uint64(len(entry.Line))
+ structuredMetadataSize += uint64(util.StructuredMetadataSize(entry.StructuredMetadata))
+ }
+ return entriesSize, structuredMetadataSize
+}
+
// newRingAndLifecycler creates a new distributor ring and lifecycler with all required lifecycler delegates
func newRingAndLifecycler(cfg RingConfig, instanceCount *atomic.Uint32, logger log.Logger, reg prometheus.Registerer, metricsNamespace string) (*ring.Ring, *ring.BasicLifecycler, error) {
kvStore, err := kv.NewClient(cfg.KVStore, ring.GetCodec(), kv.RegistererWithKVName(reg, "distributor-lifecycler"), logger)
diff --git a/pkg/kafka/encoding.go b/pkg/kafka/encoding.go
index 8c726907d2b73..e14774be6dad8 100644
--- a/pkg/kafka/encoding.go
+++ b/pkg/kafka/encoding.go
@@ -197,14 +197,16 @@ func sovPush(x uint64) (n int) {
// EncodeStreamMetadata encodes the stream metadata into a Kafka record
// using the tenantID as the key and partition as the target partition
-func EncodeStreamMetadata(partition int32, topic string, tenantID string, streamHash uint64) (*kgo.Record, error) {
+func EncodeStreamMetadata(partition int32, topic, tenantID string, streamHash, entriesSize, structuredMetadataSize uint64) (*kgo.Record, error) {
// Validate stream hash
if streamHash == 0 {
return nil, fmt.Errorf("invalid stream hash '%d'", streamHash)
}
metadata := logproto.StreamMetadata{
- StreamHash: streamHash,
+ StreamHash: streamHash,
+ EntriesSize: entriesSize,
+ StructuredMetadataSize: structuredMetadataSize,
}
// Encode the metadata into a byte slice
diff --git a/pkg/kafka/encoding_test.go b/pkg/kafka/encoding_test.go
index fb4243b74f55f..0fd0df0dd7b9b 100644
--- a/pkg/kafka/encoding_test.go
+++ b/pkg/kafka/encoding_test.go
@@ -153,35 +153,51 @@ func generateRandomString(length int) string {
func TestEncodeDecodeStreamMetadata(t *testing.T) {
tests := []struct {
- name string
- hash uint64
- partition int32
- topic string
- tenantID string
- expectErr bool
+ name string
+ hash uint64
+ partition int32
+ topic string
+ tenantID string
+ entriesSize uint64
+ structuredMetadataSize uint64
+ expectErr bool
}{
{
- name: "Valid metadata",
- hash: 12345,
- partition: 1,
- topic: "logs",
- tenantID: "tenant-1",
- expectErr: false,
+ name: "Valid metadata",
+ hash: 12345,
+ partition: 1,
+ topic: "logs",
+ tenantID: "tenant-1",
+ entriesSize: 1024,
+ structuredMetadataSize: 512,
+ expectErr: false,
},
{
- name: "Zero hash - should error",
- hash: 0,
- partition: 3,
- topic: "traces",
- tenantID: "tenant-3",
- expectErr: true,
+ name: "Valid metadata with zero sizes",
+ hash: 67890,
+ partition: 2,
+ topic: "metrics",
+ tenantID: "tenant-2",
+ entriesSize: 0,
+ structuredMetadataSize: 0,
+ expectErr: false,
+ },
+ {
+ name: "Zero hash - should error",
+ hash: 0,
+ partition: 3,
+ topic: "traces",
+ tenantID: "tenant-3",
+ entriesSize: 2048,
+ structuredMetadataSize: 1024,
+ expectErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Encode metadata
- record, err := EncodeStreamMetadata(tt.partition, tt.topic, tt.tenantID, tt.hash)
+ record, err := EncodeStreamMetadata(tt.partition, tt.topic, tt.tenantID, tt.hash, tt.entriesSize, tt.structuredMetadataSize)
if tt.expectErr {
require.Error(t, err)
require.Nil(t, record)
@@ -201,6 +217,8 @@ func TestEncodeDecodeStreamMetadata(t *testing.T) {
// Verify decoded values
require.Equal(t, tt.hash, metadata.StreamHash)
+ require.Equal(t, tt.entriesSize, metadata.EntriesSize)
+ require.Equal(t, tt.structuredMetadataSize, metadata.StructuredMetadataSize)
})
}
diff --git a/pkg/limits/frontend/frontend.go b/pkg/limits/frontend/frontend.go
index 51c7c1b0760e0..a735025f79523 100644
--- a/pkg/limits/frontend/frontend.go
+++ b/pkg/limits/frontend/frontend.go
@@ -11,9 +11,11 @@ import (
"flag"
"fmt"
"net/http"
+ "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
+ "github.com/grafana/dskit/limiter"
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/user"
@@ -34,11 +36,13 @@ const (
type Config struct {
ClientConfig limits_client.Config `yaml:"client_config"`
LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty"`
+ RecheckPeriod time.Duration `yaml:"recheck_period"`
}
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.ClientConfig.RegisterFlagsWithPrefix("ingest-limits-frontend", f)
cfg.LifecyclerConfig.RegisterFlagsWithPrefix("ingest-limits-frontend.", f, util_log.Logger)
+ f.DurationVar(&cfg.RecheckPeriod, "ingest-limits-frontend.recheck-period", 10*time.Second, "The period to recheck per tenant ingestion rate limit configuration.")
}
func (cfg *Config) Validate() error {
@@ -71,7 +75,8 @@ func New(cfg Config, ringName string, limitsRing ring.ReadRing, limits Limits, l
factory := limits_client.NewPoolFactory(cfg.ClientConfig)
pool := limits_client.NewPool(ringName, cfg.ClientConfig.PoolConfig, limitsRing, factory, logger)
- limitsSrv := NewRingIngestLimitsService(limitsRing, pool, limits, logger, reg)
+ rateLimiter := limiter.NewRateLimiter(newIngestionRateStrategy(limits), cfg.RecheckPeriod)
+ limitsSrv := NewRingIngestLimitsService(limitsRing, pool, limits, rateLimiter, logger, reg)
f := &Frontend{
cfg: cfg,
diff --git a/pkg/limits/frontend/service.go b/pkg/limits/frontend/service.go
index 5236c2c31fe2c..40dc0d297b831 100644
--- a/pkg/limits/frontend/service.go
+++ b/pkg/limits/frontend/service.go
@@ -3,10 +3,12 @@ package frontend
import (
"context"
"fmt"
- "sort"
+ "slices"
"strings"
+ "time"
"github.com/go-kit/log"
+ "github.com/grafana/dskit/limiter"
"github.com/grafana/dskit/ring"
ring_client "github.com/grafana/dskit/ring/client"
"github.com/prometheus/client_golang/prometheus"
@@ -22,12 +24,34 @@ const (
// RejectedStreamReasonExceedsGlobalLimit is the reason for rejecting a stream
// because it exceeds the global per tenant limit.
RejectedStreamReasonExceedsGlobalLimit = "exceeds_global_limit"
+
+ // RejectedStreamReasonRateLimited is the reason for rejecting a stream
+ // because it is rate limited.
+ RejectedStreamReasonRateLimited = "rate_limited"
)
-// Limits is the interface of the limits confgiration
+// Limits is the interface of the limits configuration
// builder to be passed to the frontend service.
type Limits interface {
MaxGlobalStreamsPerUser(userID string) int
+ IngestionRateBytes(userID string) float64
+ IngestionBurstSizeBytes(userID string) int
+}
+
+type ingestionRateStrategy struct {
+ limits Limits
+}
+
+func newIngestionRateStrategy(limits Limits) *ingestionRateStrategy {
+ return &ingestionRateStrategy{limits: limits}
+}
+
+func (s *ingestionRateStrategy) Limit(tenantID string) float64 {
+ return s.limits.IngestionRateBytes(tenantID)
+}
+
+func (s *ingestionRateStrategy) Burst(tenantID string) int {
+ return s.limits.IngestionBurstSizeBytes(tenantID)
}
// IngestLimitsService is responsible for receiving, processing and
@@ -80,19 +104,21 @@ type RingIngestLimitsService struct {
ring ring.ReadRing
pool *ring_client.Pool
- limits Limits
+ limits Limits
+ rateLimiter *limiter.RateLimiter
metrics *metrics
}
// NewRingIngestLimitsService returns a new RingIngestLimitsClient.
-func NewRingIngestLimitsService(ring ring.ReadRing, pool *ring_client.Pool, limits Limits, logger log.Logger, reg prometheus.Registerer) *RingIngestLimitsService {
+func NewRingIngestLimitsService(ring ring.ReadRing, pool *ring_client.Pool, limits Limits, rateLimiter *limiter.RateLimiter, logger log.Logger, reg prometheus.Registerer) *RingIngestLimitsService {
return &RingIngestLimitsService{
- logger: logger,
- ring: ring,
- pool: pool,
- limits: limits,
- metrics: newMetrics(reg),
+ logger: logger,
+ ring: ring,
+ pool: pool,
+ limits: limits,
+ rateLimiter: rateLimiter,
+ metrics: newMetrics(reg),
}
}
@@ -186,25 +212,23 @@ func (s *RingIngestLimitsService) perReplicaSetPartitions(ctx context.Context, r
// Sort partition IDs for each address for consistent ordering
for addr := range partitions {
- sort.Slice(partitions[addr], func(i, j int) bool {
- return partitions[addr][i] < partitions[addr][j]
- })
+ slices.Sort(partitions[addr])
}
return partitions, nil
}
func (s *RingIngestLimitsService) ExceedsLimits(ctx context.Context, req *logproto.ExceedsLimitsRequest) (*logproto.ExceedsLimitsResponse, error) {
- reqStreams := make([]uint64, 0, len(req.Streams))
+ streamHashes := make([]uint64, 0, len(req.Streams))
for _, stream := range req.Streams {
- reqStreams = append(reqStreams, stream.StreamHash)
+ streamHashes = append(streamHashes, stream.StreamHash)
}
resps, err := s.forAllBackends(ctx, func(_ context.Context, client logproto.IngestLimitsClient, partitions []int32) (*logproto.GetStreamUsageResponse, error) {
return client.GetStreamUsage(ctx, &logproto.GetStreamUsageRequest{
Tenant: req.Tenant,
Partitions: partitions,
- StreamHashes: reqStreams,
+ StreamHashes: streamHashes,
})
})
if err != nil {
@@ -213,38 +237,71 @@ func (s *RingIngestLimitsService) ExceedsLimits(ctx context.Context, req *logpro
maxGlobalStreams := s.limits.MaxGlobalStreamsPerUser(req.Tenant)
- var activeStreamsTotal uint64
+ var (
+ activeStreamsTotal uint64
+ tenantRateBytes float64
+ )
for _, resp := range resps {
activeStreamsTotal += resp.Response.ActiveStreams
+ tenantRateBytes += float64(resp.Response.Rate)
}
s.metrics.tenantActiveStreams.WithLabelValues(req.Tenant).Set(float64(activeStreamsTotal))
- if activeStreamsTotal < uint64(maxGlobalStreams) {
- return &logproto.ExceedsLimitsResponse{
- Tenant: req.Tenant,
- }, nil
- }
-
var (
rejectedStreams []*logproto.RejectedStream
uniqueStreamHashes = make(map[uint64]bool)
)
- for _, resp := range resps {
- for _, unknownStream := range resp.Response.UnknownStreams {
- if !uniqueStreamHashes[unknownStream] {
- uniqueStreamHashes[unknownStream] = true
- rejectedStreams = append(rejectedStreams, &logproto.RejectedStream{
- StreamHash: unknownStream,
- Reason: RejectedStreamReasonExceedsGlobalLimit,
- })
+
+ tenantRateLimit := s.rateLimiter.Limit(time.Now(), req.Tenant)
+ if tenantRateBytes > tenantRateLimit {
+ rateLimitedStreams := make([]*logproto.RejectedStream, 0, len(streamHashes))
+ for _, streamHash := range streamHashes {
+ rateLimitedStreams = append(rateLimitedStreams, &logproto.RejectedStream{
+ StreamHash: streamHash,
+ Reason: RejectedStreamReasonRateLimited,
+ })
+ }
+
+ // Count rejections by reason
+ s.metrics.tenantExceedsLimits.WithLabelValues(req.Tenant).Inc()
+ s.metrics.tenantRejectedStreams.WithLabelValues(req.Tenant, RejectedStreamReasonRateLimited).Add(float64(len(rateLimitedStreams)))
+
+ return &logproto.ExceedsLimitsResponse{
+ Tenant: req.Tenant,
+ RejectedStreams: rateLimitedStreams,
+ }, nil
+ }
+
+ // Only process global limit if we're exceeding it
+ if activeStreamsTotal >= uint64(maxGlobalStreams) {
+ for _, resp := range resps {
+ for _, unknownStream := range resp.Response.UnknownStreams {
+ if !uniqueStreamHashes[unknownStream] {
+ uniqueStreamHashes[unknownStream] = true
+ rejectedStreams = append(rejectedStreams, &logproto.RejectedStream{
+ StreamHash: unknownStream,
+ Reason: RejectedStreamReasonExceedsGlobalLimit,
+ })
+ }
}
}
}
if len(rejectedStreams) > 0 {
s.metrics.tenantExceedsLimits.WithLabelValues(req.Tenant).Inc()
- s.metrics.tenantRejectedStreams.WithLabelValues(req.Tenant, RejectedStreamReasonExceedsGlobalLimit).Add(float64(len(rejectedStreams)))
+
+ // Count rejections by reason
+ exceedsLimitCount := 0
+ for _, rejected := range rejectedStreams {
+ if rejected.Reason == RejectedStreamReasonExceedsGlobalLimit {
+ exceedsLimitCount++
+ }
+ }
+
+ if exceedsLimitCount > 0 {
+ s.metrics.tenantRejectedStreams.WithLabelValues(req.Tenant, RejectedStreamReasonExceedsGlobalLimit).Add(float64(exceedsLimitCount))
+ }
}
return &logproto.ExceedsLimitsResponse{
diff --git a/pkg/limits/frontend/service_test.go b/pkg/limits/frontend/service_test.go
index be77fcce8f7e9..5713e68776110 100644
--- a/pkg/limits/frontend/service_test.go
+++ b/pkg/limits/frontend/service_test.go
@@ -2,9 +2,13 @@ package frontend
import (
"context"
+ "fmt"
+ "sort"
"testing"
+ "time"
"github.com/go-kit/log"
+ "github.com/grafana/dskit/limiter"
"github.com/grafana/dskit/ring"
ring_client "github.com/grafana/dskit/ring/client"
"github.com/prometheus/client_golang/prometheus"
@@ -17,12 +21,21 @@ import (
type mockLimits struct {
maxGlobalStreams int
+ ingestionRate float64
}
func (m *mockLimits) MaxGlobalStreamsPerUser(_ string) int {
return m.maxGlobalStreams
}
+func (m *mockLimits) IngestionRateBytes(_ string) float64 {
+ return m.ingestionRate
+}
+
+func (m *mockLimits) IngestionBurstSizeBytes(_ string) int {
+ return 1000
+}
+
type mockReadRing struct {
ring.ReadRing
replicationSet ring.ReplicationSet
@@ -33,14 +46,15 @@ func (m *mockReadRing) GetAllHealthy(_ ring.Operation) (ring.ReplicationSet, err
}
type mockFactory struct {
- clients []logproto.IngestLimitsClient
+ clientsByAddr map[string]logproto.IngestLimitsClient
}
-func (f *mockFactory) FromInstance(_ ring.InstanceDesc) (ring_client.PoolClient, error) {
- for _, c := range f.clients {
- return c.(ring_client.PoolClient), nil
+func (f *mockFactory) FromInstance(inst ring.InstanceDesc) (ring_client.PoolClient, error) {
+ client, ok := f.clientsByAddr[inst.Addr]
+ if !ok {
+ return nil, fmt.Errorf("no client for address %s", inst.Addr)
}
- return nil, nil
+ return client.(ring_client.PoolClient), nil
}
type mockIngestLimitsClient struct {
@@ -54,7 +68,14 @@ func (m *mockIngestLimitsClient) GetAssignedPartitions(_ context.Context, _ *log
}
func (m *mockIngestLimitsClient) GetStreamUsage(_ context.Context, _ *logproto.GetStreamUsageRequest, _ ...grpc.CallOption) (*logproto.GetStreamUsageResponse, error) {
- return m.getStreamUsageResponse, nil
+ // Create a copy of the response to avoid modifying the original
+ resp := &logproto.GetStreamUsageResponse{
+ Tenant: m.getStreamUsageResponse.Tenant,
+ ActiveStreams: m.getStreamUsageResponse.ActiveStreams,
+ Rate: m.getStreamUsageResponse.Rate,
+ UnknownStreams: m.getStreamUsageResponse.UnknownStreams,
+ }
+ return resp, nil
}
func (m *mockIngestLimitsClient) Close() error {
@@ -76,6 +97,7 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
name string
tenant string
maxGlobalStreams int
+ ingestionRate float64
streams []*logproto.StreamMetadata
getStreamUsageResps []*logproto.GetStreamUsageResponse
getAssignedPartitionsResps []*logproto.GetAssignedPartitionsResponse
@@ -85,11 +107,13 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
name: "no streams",
tenant: "test",
maxGlobalStreams: 10,
+ ingestionRate: 100,
streams: []*logproto.StreamMetadata{},
getStreamUsageResps: []*logproto.GetStreamUsageResponse{
{
Tenant: "test",
ActiveStreams: 0,
+ Rate: 10,
},
},
getAssignedPartitionsResps: []*logproto.GetAssignedPartitionsResponse{
@@ -106,6 +130,7 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
name: "under limit",
tenant: "test",
maxGlobalStreams: 10,
+ ingestionRate: 100,
streams: []*logproto.StreamMetadata{
{StreamHash: 1},
{StreamHash: 2},
@@ -114,6 +139,7 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
{
Tenant: "test",
ActiveStreams: 2,
+ Rate: 10,
},
},
getAssignedPartitionsResps: []*logproto.GetAssignedPartitionsResponse{
@@ -130,6 +156,7 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
name: "exceeds limit with new streams",
tenant: "test",
maxGlobalStreams: 5,
+ ingestionRate: 100,
streams: []*logproto.StreamMetadata{
{StreamHash: 6}, // Exceeds limit
{StreamHash: 7}, // Exceeds limit
@@ -138,6 +165,7 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
{
Tenant: "test",
ActiveStreams: 5,
+ Rate: 10,
UnknownStreams: []uint64{6, 7},
},
},
@@ -158,6 +186,7 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
name: "exceeds limit but reject only new streams",
tenant: "test",
maxGlobalStreams: 5,
+ ingestionRate: 100,
streams: []*logproto.StreamMetadata{
{StreamHash: 1},
{StreamHash: 2},
@@ -171,6 +200,7 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
{
Tenant: "test",
ActiveStreams: 5,
+ Rate: 10,
UnknownStreams: []uint64{6, 7},
},
},
@@ -191,6 +221,7 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
name: "empty response from backend",
tenant: "test",
maxGlobalStreams: 10,
+ ingestionRate: 100,
streams: []*logproto.StreamMetadata{
{StreamHash: 1},
},
@@ -206,6 +237,126 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
},
expectedRejections: nil, // No rejections because activeStreamsTotal is 0
},
+ {
+ name: "rate limit not exceeded",
+ tenant: "test",
+ maxGlobalStreams: 10,
+ ingestionRate: 100,
+ streams: []*logproto.StreamMetadata{
+ {StreamHash: 1},
+ {StreamHash: 2},
+ },
+ getStreamUsageResps: []*logproto.GetStreamUsageResponse{
+ {
+ Tenant: "test",
+ ActiveStreams: 2,
+ Rate: 50, // Below the limit of 100 bytes/sec
+ },
+ },
+ getAssignedPartitionsResps: []*logproto.GetAssignedPartitionsResponse{
+ {
+ AssignedPartitions: map[int32]int64{
+ 0: 1,
+ },
+ },
+ },
+ expectedRejections: nil,
+ },
+ {
+ name: "rate limit exceeded",
+ tenant: "test",
+ maxGlobalStreams: 10,
+ ingestionRate: 100,
+ streams: []*logproto.StreamMetadata{
+ {StreamHash: 1},
+ {StreamHash: 2},
+ },
+ getStreamUsageResps: []*logproto.GetStreamUsageResponse{
+ {
+ Tenant: "test",
+ ActiveStreams: 2,
+ Rate: 1500, // Above the limit of 100 bytes/sec
+ },
+ },
+ getAssignedPartitionsResps: []*logproto.GetAssignedPartitionsResponse{
+ {
+ AssignedPartitions: map[int32]int64{
+ 0: 1,
+ },
+ },
+ },
+ expectedRejections: []*logproto.RejectedStream{
+ {StreamHash: 1, Reason: RejectedStreamReasonRateLimited},
+ {StreamHash: 2, Reason: RejectedStreamReasonRateLimited},
+ },
+ },
+ {
+ name: "rate limit exceeded with multiple instances",
+ tenant: "test",
+ maxGlobalStreams: 10,
+ ingestionRate: 100,
+ streams: []*logproto.StreamMetadata{
+ {StreamHash: 1},
+ {StreamHash: 2},
+ },
+ getStreamUsageResps: []*logproto.GetStreamUsageResponse{
+ {
+ Tenant: "test",
+ ActiveStreams: 1,
+ Rate: 600,
+ },
+ {
+ Tenant: "test",
+ ActiveStreams: 1,
+ Rate: 500,
+ },
+ },
+ getAssignedPartitionsResps: []*logproto.GetAssignedPartitionsResponse{
+ {
+ AssignedPartitions: map[int32]int64{
+ 0: 1,
+ },
+ },
+ {
+ AssignedPartitions: map[int32]int64{
+ 1: 1,
+ },
+ },
+ },
+ expectedRejections: []*logproto.RejectedStream{
+ {StreamHash: 1, Reason: RejectedStreamReasonRateLimited},
+ {StreamHash: 2, Reason: RejectedStreamReasonRateLimited},
+ },
+ },
+ {
+ name: "both global limit and rate limit exceeded",
+ tenant: "test",
+ maxGlobalStreams: 5,
+ ingestionRate: 100,
+ streams: []*logproto.StreamMetadata{
+ {StreamHash: 6},
+ {StreamHash: 7},
+ },
+ getStreamUsageResps: []*logproto.GetStreamUsageResponse{
+ {
+ Tenant: "test",
+ ActiveStreams: 5,
+ UnknownStreams: []uint64{6, 7},
+ Rate: 1500, // Above the limit of 100 bytes/sec
+ },
+ },
+ getAssignedPartitionsResps: []*logproto.GetAssignedPartitionsResponse{
+ {
+ AssignedPartitions: map[int32]int64{
+ 0: 1,
+ },
+ },
+ },
+ expectedRejections: []*logproto.RejectedStream{
+ {StreamHash: 6, Reason: RejectedStreamReasonRateLimited},
+ {StreamHash: 7, Reason: RejectedStreamReasonRateLimited},
+ },
+ },
}
for _, tt := range tests {
@@ -213,15 +364,18 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
// Create mock clients that return the test responses
mockClients := make([]logproto.IngestLimitsClient, len(tt.getStreamUsageResps))
mockInstances := make([]ring.InstanceDesc, len(tt.getStreamUsageResps))
+ clientsByAddr := make(map[string]logproto.IngestLimitsClient)
for i, resp := range tt.getStreamUsageResps {
mockClients[i] = &mockIngestLimitsClient{
getStreamUsageResponse: resp,
getAssignedPartitionsResponse: tt.getAssignedPartitionsResps[i],
}
+ addr := fmt.Sprintf("mock-instance-%d", i)
mockInstances[i] = ring.InstanceDesc{
- Addr: "mock-instance",
+ Addr: addr,
}
+ clientsByAddr[addr] = mockClients[i]
}
mockRing := &mockReadRing{
@@ -239,16 +393,19 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
"test",
poolCfg,
ring_client.NewRingServiceDiscovery(mockRing),
- &mockFactory{clients: mockClients},
+ &mockFactory{clientsByAddr: clientsByAddr},
prometheus.NewGauge(prometheus.GaugeOpts{}),
log.NewNopLogger(),
)
mockLimits := &mockLimits{
maxGlobalStreams: tt.maxGlobalStreams,
+ ingestionRate: tt.ingestionRate,
}
- service := NewRingIngestLimitsService(mockRing, mockPool, mockLimits, log.NewNopLogger(), prometheus.NewRegistry())
+ rateLimiter := limiter.NewRateLimiter(newIngestionRateStrategy(mockLimits), 10*time.Second)
+
+ service := NewRingIngestLimitsService(mockRing, mockPool, mockLimits, rateLimiter, log.NewNopLogger(), prometheus.NewRegistry())
req := &logproto.ExceedsLimitsRequest{
Tenant: tt.tenant,
@@ -258,6 +415,26 @@ func TestRingIngestLimitsService_ExceedsLimits(t *testing.T) {
resp, err := service.ExceedsLimits(context.Background(), req)
require.NoError(t, err)
require.Equal(t, tt.tenant, resp.Tenant)
+
+ // Sort the rejected streams for consistent comparison
+ if resp.RejectedStreams != nil {
+ sort.Slice(resp.RejectedStreams, func(i, j int) bool {
+ if resp.RejectedStreams[i].StreamHash == resp.RejectedStreams[j].StreamHash {
+ return resp.RejectedStreams[i].Reason < resp.RejectedStreams[j].Reason
+ }
+ return resp.RejectedStreams[i].StreamHash < resp.RejectedStreams[j].StreamHash
+ })
+ }
+
+ if tt.expectedRejections != nil {
+ sort.Slice(tt.expectedRejections, func(i, j int) bool {
+ if tt.expectedRejections[i].StreamHash == tt.expectedRejections[j].StreamHash {
+ return tt.expectedRejections[i].Reason < tt.expectedRejections[j].Reason
+ }
+ return tt.expectedRejections[i].StreamHash < tt.expectedRejections[j].StreamHash
+ })
+ }
+
require.Equal(t, tt.expectedRejections, resp.RejectedStreams)
})
}
diff --git a/pkg/limits/ingest_limits.go b/pkg/limits/ingest_limits.go
index edfd89d9fe11e..62d5eb33cdeb2 100644
--- a/pkg/limits/ingest_limits.go
+++ b/pkg/limits/ingest_limits.go
@@ -6,8 +6,6 @@ import (
"flag"
"fmt"
"net/http"
- "strconv"
- "strings"
"sync"
"time"
@@ -57,6 +55,13 @@ var (
[]string{"tenant"},
nil,
)
+
+ tenantIngestedBytesTotal = prometheus.NewDesc(
+ constants.Loki+"_ingest_limits_ingested_bytes_total",
+ "The total number of bytes ingested per tenant within the active window. This is not a global total, as tenants can be sharded over multiple pods.",
+ []string{"tenant"},
+ nil,
+ )
)
// Config represents the configuration for the ingest limits service.
@@ -68,6 +73,17 @@ type Config struct {
// Stream metadata older than WindowSize will be evicted from the metadata map.
WindowSize time.Duration `yaml:"window_size"`
+ // RateWindow defines the time window for rate calculation.
+ // This should match the window used in Prometheus rate() queries for consistency,
+ // when using the `loki_ingest_limits_ingested_bytes_total` metric.
+ // Defaults to 5 minutes if not specified.
+ RateWindow time.Duration `yaml:"rate_window"`
+
+ // BucketDuration defines the granularity of time buckets used for sliding window rate calculation.
+ // Smaller buckets provide more precise rate tracking but require more memory.
+ // Defaults to 1 minute if not specified.
+ BucketDuration time.Duration `yaml:"bucket_duration"`
+
// LifecyclerConfig is the config to build a ring lifecycler.
LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty"`
@@ -82,10 +98,27 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.LifecyclerConfig.RegisterFlagsWithPrefix("ingest-limits.", f, util_log.Logger)
f.BoolVar(&cfg.Enabled, "ingest-limits.enabled", false, "Enable the ingest limits service.")
f.DurationVar(&cfg.WindowSize, "ingest-limits.window-size", 1*time.Hour, "The time window for which stream metadata is considered active.")
+ f.DurationVar(&cfg.RateWindow, "ingest-limits.rate-window", 5*time.Minute, "The time window for rate calculation. This should match the window used in Prometheus rate() queries for consistency.")
+ f.DurationVar(&cfg.BucketDuration, "ingest-limits.bucket-duration", 1*time.Minute, "The granularity of time buckets used for sliding window rate calculation. Smaller buckets provide more precise rate tracking but require more memory.")
f.IntVar(&cfg.NumPartitions, "ingest-limits.num-partitions", 64, "The number of partitions for the Kafka topic used to read and write stream metadata. It is fixed, not a maximum.")
}
func (cfg *Config) Validate() error {
+ if cfg.WindowSize <= 0 {
+ return errors.New("window-size must be greater than 0")
+ }
+ if cfg.RateWindow <= 0 {
+ return errors.New("rate-window must be greater than 0")
+ }
+ if cfg.BucketDuration <= 0 {
+ return errors.New("bucket-duration must be greater than 0")
+ }
+ if cfg.RateWindow < cfg.BucketDuration {
+ return errors.New("rate-window must be greater than or equal to bucket-duration")
+ }
+ if cfg.NumPartitions <= 0 {
+ return errors.New("num-partitions must be greater than 0")
+ }
return nil
}
@@ -123,6 +156,15 @@ func newMetrics(reg prometheus.Registerer) *metrics {
type streamMetadata struct {
hash uint64
lastSeenAt int64
+ totalSize uint64
+ // Add a slice to track bytes per time interval for sliding window rate calculation
+ rateBuckets []rateBucket
+}
+
+// rateBucket represents the bytes received during a specific time interval
+type rateBucket struct {
+ timestamp int64 // start of the interval
+ size uint64 // bytes received during this interval
}
// IngestLimits is a service that manages stream metadata limits.
@@ -211,6 +253,7 @@ func (s *IngestLimits) Describe(descs chan<- *prometheus.Desc) {
descs <- tenantPartitionDesc
descs <- tenantRecordedStreamsDesc
descs <- tenantActiveStreamsDesc
+ descs <- tenantIngestedBytesTotal
}
func (s *IngestLimits) Collect(m chan<- prometheus.Metric) {
@@ -221,8 +264,9 @@ func (s *IngestLimits) Collect(m chan<- prometheus.Metric) {
for tenant, partitions := range s.metadata {
var (
- recorded int
- active int
+ recorded int
+ active int
+ totalBytes uint64
)
for partitionID, partition := range partitions {
@@ -235,6 +279,7 @@ func (s *IngestLimits) Collect(m chan<- prometheus.Metric) {
for _, stream := range partition {
if stream.lastSeenAt >= cutoff {
active++
+ totalBytes += stream.totalSize
}
}
}
@@ -242,6 +287,7 @@ func (s *IngestLimits) Collect(m chan<- prometheus.Metric) {
m <- prometheus.MustNewConstMetric(tenantPartitionDesc, prometheus.GaugeValue, float64(len(partitions)), tenant)
m <- prometheus.MustNewConstMetric(tenantRecordedStreamsDesc, prometheus.GaugeValue, float64(recorded), tenant)
m <- prometheus.MustNewConstMetric(tenantActiveStreamsDesc, prometheus.GaugeValue, float64(active), tenant)
+ m <- prometheus.MustNewConstMetric(tenantIngestedBytesTotal, prometheus.CounterValue, float64(totalBytes), tenant)
}
}
@@ -385,24 +431,29 @@ func (s *IngestLimits) evictOldStreams(_ context.Context) {
for tenant, partitions := range s.metadata {
evicted := 0
for partitionID, streams := range partitions {
- for i, stream := range streams {
- if stream.lastSeenAt < cutoff {
- // Delete the element without allocating or copying into
- // a new backing array https://go.dev/wiki/SliceTricks#delete.
- s.metadata[tenant][partitionID] = append(
- s.metadata[tenant][partitionID][:i],
- s.metadata[tenant][partitionID][i+1:]...,
- )
+ // Create a new slice with only active streams
+ activeStreams := make([]streamMetadata, 0, len(streams))
+ for _, stream := range streams {
+ if stream.lastSeenAt >= cutoff {
+ activeStreams = append(activeStreams, stream)
+ } else {
evicted++
}
}
+ s.metadata[tenant][partitionID] = activeStreams
+
+ // If no active streams in this partition, delete it
+ if len(activeStreams) == 0 {
+ delete(s.metadata[tenant], partitionID)
+ }
}
+
+ // If no partitions left for this tenant, delete the tenant
if len(s.metadata[tenant]) == 0 {
delete(s.metadata, tenant)
}
- s.metrics.tenantStreamEvictionsTotal.
- WithLabelValues(tenant).
- Add(float64(evicted))
+
+ s.metrics.tenantStreamEvictionsTotal.WithLabelValues(tenant).Add(float64(evicted))
}
}
@@ -450,18 +501,67 @@ func (s *IngestLimits) updateMetadata(rec *logproto.StreamMetadata, tenant strin
// Use the provided lastSeenAt timestamp as the last seen time
recordTime := lastSeenAt.UnixNano()
+ recTotalSize := rec.EntriesSize + rec.StructuredMetadataSize
+
+ // Get the bucket for this timestamp using the configured interval duration
+ bucketStart := lastSeenAt.Truncate(s.cfg.BucketDuration).UnixNano()
+
+ // Calculate the rate window cutoff for cleaning up old buckets
+ rateWindowCutoff := lastSeenAt.Add(-s.cfg.RateWindow).UnixNano()
for i, stream := range s.metadata[tenant][partition] {
if stream.hash == rec.StreamHash {
- stream.lastSeenAt = recordTime
- s.metadata[tenant][partition][i] = stream
+ // Update total size
+ totalSize := stream.totalSize + recTotalSize
+
+ // Update or add size for the current bucket
+ updated := false
+ sb := make([]rateBucket, 0, len(stream.rateBuckets)+1)
+
+ // Only keep buckets within the rate window and update the current bucket
+ for _, bucket := range stream.rateBuckets {
+ // Clean up buckets outside the rate window
+ if bucket.timestamp < rateWindowCutoff {
+ continue
+ }
+
+ if bucket.timestamp == bucketStart {
+ // Update existing bucket
+ sb = append(sb, rateBucket{
+ timestamp: bucketStart,
+ size: bucket.size + recTotalSize,
+ })
+ updated = true
+ } else {
+ // Keep other buckets within the rate window as is
+ sb = append(sb, bucket)
+ }
+ }
+
+ // Add new bucket if it wasn't updated
+ if !updated {
+ sb = append(sb, rateBucket{
+ timestamp: bucketStart,
+ size: recTotalSize,
+ })
+ }
+
+ s.metadata[tenant][partition][i] = streamMetadata{
+ hash: stream.hash,
+ lastSeenAt: recordTime,
+ totalSize: totalSize,
+ rateBuckets: sb,
+ }
return
}
}
+ // Create new stream metadata with the initial interval
s.metadata[tenant][partition] = append(s.metadata[tenant][partition], streamMetadata{
- hash: rec.StreamHash,
- lastSeenAt: recordTime,
+ hash: rec.StreamHash,
+ lastSeenAt: recordTime,
+ totalSize: recTotalSize,
+ rateBuckets: []rateBucket{{timestamp: bucketStart, size: recTotalSize}},
})
}
@@ -493,74 +593,69 @@ func (s *IngestLimits) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Get the cutoff time for active streams
cutoff := time.Now().Add(-s.cfg.WindowSize).UnixNano()
+ // Get the rate window cutoff for rate calculations
+ rateWindowCutoff := time.Now().Add(-s.cfg.BucketDuration).UnixNano()
+
// Calculate stream counts and status per tenant
type tenantLimits struct {
- Tenant string `json:"tenant"`
- ActiveStreams uint64 `json:"activeStreams"`
- AssignedStreams []uint64 `json:"assignedStreams"`
+ Tenant string `json:"tenant"`
+ ActiveStreams uint64 `json:"activeStreams"`
+ Rate float64 `json:"rate"`
}
// Get tenant and partitions from query parameters
params := r.URL.Query()
tenant := params.Get("tenant")
- partitionsStr := params.Get("partitions")
-
- var requestedPartitions []int32
- if partitionsStr != "" {
- // Split comma-separated partition list
- partitionStrs := strings.Split(partitionsStr, ",")
- requestedPartitions = make([]int32, 0, len(partitionStrs))
-
- // Convert each partition string to int32
- for _, p := range partitionStrs {
- if val, err := strconv.ParseInt(strings.TrimSpace(p), 10, 32); err == nil {
- requestedPartitions = append(requestedPartitions, int32(val))
- }
- }
- }
-
- partitions := s.metadata[tenant]
-
var (
- activeStreams uint64
- assignedStreams = make([]uint64, 0)
- response = make(map[string]tenantLimits)
+ activeStreams uint64
+ totalSize uint64
+ response tenantLimits
)
- for _, requestedID := range requestedPartitions {
- // Consider the recorded stream if it's partition
- // is one of the partitions we are still assigned to.
- assigned := false
- for assignedID := range partitions {
- if requestedID == assignedID {
- assigned = true
- break
- }
- }
-
- if !assigned {
- continue
- }
-
- // If the stream is written into a partition we are
- // assigned to and has been seen within the window,
- // it is an active stream.
- for _, stream := range partitions[requestedID] {
+ for _, partitions := range s.metadata[tenant] {
+ for _, stream := range partitions {
if stream.lastSeenAt >= cutoff {
activeStreams++
- assignedStreams = append(assignedStreams, stream.hash)
+
+ // Calculate size only within the rate window
+ for _, bucket := range stream.rateBuckets {
+ if bucket.timestamp >= rateWindowCutoff {
+ totalSize += bucket.size
+ }
+ }
}
}
}
- if activeStreams > 0 || len(assignedStreams) > 0 {
- response[tenant] = tenantLimits{
- Tenant: tenant,
- ActiveStreams: activeStreams,
- AssignedStreams: assignedStreams,
+ // Calculate rate using only data from within the rate window
+ calculatedRate := float64(totalSize) / s.cfg.WindowSize.Seconds()
+
+ if activeStreams > 0 {
+ response = tenantLimits{
+ Tenant: tenant,
+ ActiveStreams: activeStreams,
+ Rate: calculatedRate,
+ }
+ } else {
+ // If no active streams found, return zeros
+ response = tenantLimits{
+ Tenant: tenant,
+ ActiveStreams: 0,
+ Rate: 0,
}
}
+ // Log the calculated values for debugging
+ level.Debug(s.logger).Log(
+ "msg", "HTTP endpoint calculated stream usage",
+ "tenant", tenant,
+ "active_streams", activeStreams,
+ "total_size", util.HumanizeBytes(totalSize),
+ "rate_window_seconds", s.cfg.RateWindow.Seconds(),
+ "calculated_rate", util.HumanizeBytes(uint64(calculatedRate)),
+ )
+
+ // Use util.WriteJSONResponse to write the JSON response
util.WriteJSONResponse(w, response)
}
@@ -584,6 +679,9 @@ func (s *IngestLimits) GetStreamUsage(_ context.Context, req *logproto.GetStream
// Get the cutoff time for active streams
cutoff := time.Now().Add(-s.cfg.WindowSize).UnixNano()
+ // Calculate the rate window cutoff in nanoseconds
+ rateWindowCutoff := time.Now().Add(-s.cfg.RateWindow).UnixNano()
+
// Get the tenant's streams
partitions := s.metadata[req.Tenant]
if partitions == nil {
@@ -598,7 +696,11 @@ func (s *IngestLimits) GetStreamUsage(_ context.Context, req *logproto.GetStream
// across all assigned partitions and record
// the streams that have been seen within the
// window
- var activeStreams uint64
+ var (
+ activeStreams uint64
+ totalSize uint64
+ )
+
for _, requestedID := range req.Partitions {
// Consider the recorded stream if it's partition
// is one of the partitions we are still assigned to.
@@ -618,21 +720,30 @@ func (s *IngestLimits) GetStreamUsage(_ context.Context, req *logproto.GetStream
// assigned to and has been seen within the window,
// it is an active stream.
for _, stream := range partitions[requestedID] {
- if stream.lastSeenAt >= cutoff {
- activeStreams++
+ if stream.lastSeenAt < cutoff {
+ continue
+ }
+
+ activeStreams++
+
+ // Calculate size only within the rate window
+ for _, bucket := range stream.rateBuckets {
+ if bucket.timestamp >= rateWindowCutoff {
+ totalSize += bucket.size
+ }
}
}
}
// Get the unknown streams
var unknownStreams []uint64
- for _, reqHash := range req.StreamHashes {
+ for _, streamHash := range req.StreamHashes {
found := false
outer:
for _, streams := range partitions {
for _, stream := range streams {
- if stream.hash == reqHash && stream.lastSeenAt >= cutoff {
+ if stream.hash == streamHash && stream.lastSeenAt >= cutoff {
found = true
break outer
}
@@ -640,13 +751,28 @@ func (s *IngestLimits) GetStreamUsage(_ context.Context, req *logproto.GetStream
}
if !found {
- unknownStreams = append(unknownStreams, reqHash)
+ unknownStreams = append(unknownStreams, streamHash)
+ continue
}
}
+ // Calculate rate using only data from within the rate window
+ rate := float64(totalSize) / s.cfg.RateWindow.Seconds()
+
+ // Debug logging to help diagnose rate calculation issues
+ level.Debug(s.logger).Log(
+ "msg", "calculated stream usage",
+ "tenant", req.Tenant,
+ "active_streams", activeStreams,
+ "total_size", util.HumanizeBytes(totalSize),
+ "rate_window_seconds", s.cfg.RateWindow.Seconds(),
+ "calculated_rate", util.HumanizeBytes(uint64(rate)),
+ )
+
return &logproto.GetStreamUsageResponse{
Tenant: req.Tenant,
ActiveStreams: activeStreams,
UnknownStreams: unknownStreams,
+ Rate: int64(rate),
}, nil
}
diff --git a/pkg/limits/ingest_limits_test.go b/pkg/limits/ingest_limits_test.go
index 88c1d2b834804..a3f5730eee47d 100644
--- a/pkg/limits/ingest_limits_test.go
+++ b/pkg/limits/ingest_limits_test.go
@@ -11,6 +11,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
+ "github.com/grafana/loki/v3/pkg/kafka"
"github.com/grafana/loki/v3/pkg/logproto"
)
@@ -22,6 +23,8 @@ func TestIngestLimits_GetStreamUsage(t *testing.T) {
assignedPartitionIDs []int32
metadata map[string]map[int32][]streamMetadata
windowSize time.Duration
+ rateWindow time.Duration
+ bucketDuration time.Duration
// Request data for GetStreamUsage.
tenantID string
@@ -30,154 +33,245 @@ func TestIngestLimits_GetStreamUsage(t *testing.T) {
// Expectations.
expectedActive uint64
+ expectedRate int64
expectedUnknownStreams []uint64
}{
{
- name: "tenant not found",
+ name: "tenant not found",
+ // setup data
assignedPartitionIDs: []int32{0},
metadata: map[string]map[int32][]streamMetadata{
"tenant2": {
0: []streamMetadata{
- {hash: 4, lastSeenAt: time.Now().UnixNano()},
- {hash: 5, lastSeenAt: time.Now().UnixNano()},
+ {hash: 4, lastSeenAt: time.Now().UnixNano(), totalSize: 1000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 1000}}},
+ {hash: 5, lastSeenAt: time.Now().UnixNano(), totalSize: 2000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 2000}}},
},
},
},
- windowSize: time.Hour,
+ windowSize: time.Hour,
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
tenantID: "tenant1",
partitionIDs: []int32{0},
streamHashes: []uint64{4, 5},
},
{
- name: "all streams active",
+ name: "all streams active",
+ // setup data
assignedPartitionIDs: []int32{0},
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: []streamMetadata{
- {hash: 1, lastSeenAt: time.Now().UnixNano()},
- {hash: 2, lastSeenAt: time.Now().UnixNano()},
- {hash: 3, lastSeenAt: time.Now().UnixNano()},
- {hash: 4, lastSeenAt: time.Now().UnixNano()},
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 1000}}},
+ {hash: 2, lastSeenAt: time.Now().UnixNano(), totalSize: 2000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 2000}}},
+ {hash: 3, lastSeenAt: time.Now().UnixNano(), totalSize: 3000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 3000}}},
+ {hash: 4, lastSeenAt: time.Now().UnixNano(), totalSize: 4000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 4000}}},
},
},
},
windowSize: time.Hour,
- tenantID: "tenant1",
- partitionIDs: []int32{0},
- streamHashes: []uint64{1, 2, 3, 4},
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
+ tenantID: "tenant1",
+ partitionIDs: []int32{0},
+ streamHashes: []uint64{1, 2, 3, 4},
+ // expectations
expectedActive: 4,
+ expectedRate: int64(10000) / int64(5*60), // 10000 bytes / 5 minutes in seconds
},
{
- name: "mixed active and expired streams",
+ name: "mixed active and expired streams",
+ // setup data
assignedPartitionIDs: []int32{0},
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: []streamMetadata{
- {hash: 1, lastSeenAt: time.Now().UnixNano()},
- {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano()}, // expired
- {hash: 3, lastSeenAt: time.Now().UnixNano()},
- {hash: 4, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano()}, // expired
- {hash: 5, lastSeenAt: time.Now().UnixNano()},
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 1000}}},
+ {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 2000}, // expired
+ {hash: 3, lastSeenAt: time.Now().UnixNano(), totalSize: 3000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 3000}}},
+ {hash: 4, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 4000}, // expired
+ {hash: 5, lastSeenAt: time.Now().UnixNano(), totalSize: 5000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 5000}}},
},
},
},
windowSize: time.Hour,
- tenantID: "tenant1",
- partitionIDs: []int32{0},
- streamHashes: []uint64{1, 3, 5},
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
+ tenantID: "tenant1",
+ partitionIDs: []int32{0},
+ streamHashes: []uint64{1, 3, 5},
+ // expectations
expectedActive: 3,
+ expectedRate: int64(9000) / int64(5*60), // 9000 bytes / 5 minutes in seconds
},
{
- name: "all streams expired",
+ name: "all streams expired",
+ // setup data
assignedPartitionIDs: []int32{0},
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: []streamMetadata{
- {hash: 1, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano()},
- {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano()},
+ {hash: 1, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 1000},
+ {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 2000},
},
},
},
- windowSize: time.Hour,
- tenantID: "tenant1",
+ windowSize: time.Hour,
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
+ tenantID: "tenant1",
+ // expectations
+ expectedActive: 0,
+ expectedRate: 0,
},
{
- name: "empty stream hashes",
+ name: "empty stream hashes",
+ // setup data
assignedPartitionIDs: []int32{0},
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: []streamMetadata{
- {hash: 1, lastSeenAt: time.Now().UnixNano()},
- {hash: 2, lastSeenAt: time.Now().UnixNano()},
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 1000}}},
+ {hash: 2, lastSeenAt: time.Now().UnixNano(), totalSize: 2000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 2000}}},
},
},
},
windowSize: time.Hour,
- tenantID: "tenant1",
- partitionIDs: []int32{0},
- streamHashes: []uint64{},
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
+ tenantID: "tenant1",
+ partitionIDs: []int32{0},
+ streamHashes: []uint64{},
+ //expectations
expectedActive: 2,
+ expectedRate: int64(3000) / int64(5*60), // 3000 bytes / 5 minutes in seconds
},
{
- name: "unknown streams requested",
+ name: "unknown streams requested",
+ // setup data
assignedPartitionIDs: []int32{0},
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: []streamMetadata{
- {hash: 1, lastSeenAt: time.Now().UnixNano()},
- {hash: 2, lastSeenAt: time.Now().UnixNano()},
- {hash: 3, lastSeenAt: time.Now().UnixNano()},
- {hash: 4, lastSeenAt: time.Now().UnixNano()},
- {hash: 5, lastSeenAt: time.Now().UnixNano()},
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 1000}}},
+ {hash: 2, lastSeenAt: time.Now().UnixNano(), totalSize: 2000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 2000}}},
+ {hash: 3, lastSeenAt: time.Now().UnixNano(), totalSize: 3000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 3000}}},
+ {hash: 4, lastSeenAt: time.Now().UnixNano(), totalSize: 4000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 4000}}},
+ {hash: 5, lastSeenAt: time.Now().UnixNano(), totalSize: 5000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 5000}}},
},
},
},
- windowSize: time.Hour,
- tenantID: "tenant1",
- partitionIDs: []int32{0},
- streamHashes: []uint64{6, 7, 8},
+ windowSize: time.Hour,
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
+ tenantID: "tenant1",
+ partitionIDs: []int32{0},
+ streamHashes: []uint64{6, 7, 8},
+ // expecations
expectedActive: 5,
expectedUnknownStreams: []uint64{6, 7, 8},
+ expectedRate: int64(15000) / int64(5*60), // 15000 bytes / 5 minutes in seconds
},
{
- name: "multiple assigned partitions",
+ name: "multiple assigned partitions",
+ // setup data
assignedPartitionIDs: []int32{0, 1},
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: []streamMetadata{
- {hash: 1, lastSeenAt: time.Now().UnixNano()},
- {hash: 2, lastSeenAt: time.Now().UnixNano()},
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 1000}}},
+ {hash: 2, lastSeenAt: time.Now().UnixNano(), totalSize: 2000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 2000}}},
},
1: []streamMetadata{
- {hash: 3, lastSeenAt: time.Now().UnixNano()},
- {hash: 4, lastSeenAt: time.Now().UnixNano()},
- {hash: 5, lastSeenAt: time.Now().UnixNano()},
+ {hash: 3, lastSeenAt: time.Now().UnixNano(), totalSize: 3000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 3000}}},
+ {hash: 4, lastSeenAt: time.Now().UnixNano(), totalSize: 4000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 4000}}},
+ {hash: 5, lastSeenAt: time.Now().UnixNano(), totalSize: 5000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 5000}}},
},
},
},
- tenantID: "tenant1",
- partitionIDs: []int32{0, 1},
- streamHashes: []uint64{1, 2, 3, 4, 5},
windowSize: time.Hour,
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
+ tenantID: "tenant1",
+ partitionIDs: []int32{0, 1},
+ streamHashes: []uint64{1, 2, 3, 4, 5},
+ // expectations
expectedActive: 5,
+ expectedRate: int64(15000) / int64(5*60), // 15000 bytes / 5 minutes in seconds
},
{
- name: "multiple partitions with unasigned partitions",
+ name: "multiple partitions with unasigned partitions",
+ // setup data
assignedPartitionIDs: []int32{0},
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: []streamMetadata{
- {hash: 1, lastSeenAt: time.Now().UnixNano()},
- {hash: 2, lastSeenAt: time.Now().UnixNano()},
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 1000}}},
+ {hash: 2, lastSeenAt: time.Now().UnixNano(), totalSize: 2000, rateBuckets: []rateBucket{{timestamp: time.Now().UnixNano(), size: 2000}}},
},
},
},
- windowSize: time.Hour,
- tenantID: "tenant1",
- partitionIDs: []int32{0, 1},
- streamHashes: []uint64{1, 2, 3, 4, 5},
+ windowSize: time.Hour,
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
+ tenantID: "tenant1",
+ partitionIDs: []int32{0, 1},
+ streamHashes: []uint64{1, 2, 3, 4, 5},
+ // expectations
expectedActive: 2,
expectedUnknownStreams: []uint64{3, 4, 5},
+ expectedRate: int64(3000) / int64(5*60), // 3000 bytes / 5 minutes in seconds
+ },
+ {
+ name: "mixed buckets within and outside rate window",
+ // setup data
+ assignedPartitionIDs: []int32{0},
+ metadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {
+ hash: 1,
+ lastSeenAt: time.Now().UnixNano(),
+ totalSize: 5000, // Total size includes all buckets
+ rateBuckets: []rateBucket{
+ {timestamp: time.Now().Add(-10 * time.Minute).UnixNano(), size: 1000}, // Outside rate window
+ {timestamp: time.Now().Add(-6 * time.Minute).UnixNano(), size: 1500}, // Outside rate window
+ {timestamp: time.Now().Add(-4 * time.Minute).UnixNano(), size: 1000}, // Inside rate window
+ {timestamp: time.Now().Add(-2 * time.Minute).UnixNano(), size: 1500}, // Inside rate window
+ },
+ },
+ {
+ hash: 2,
+ lastSeenAt: time.Now().UnixNano(),
+ totalSize: 4000, // Total size includes all buckets
+ rateBuckets: []rateBucket{
+ {timestamp: time.Now().Add(-8 * time.Minute).UnixNano(), size: 1000}, // Outside rate window
+ {timestamp: time.Now().Add(-3 * time.Minute).UnixNano(), size: 1500}, // Inside rate window
+ {timestamp: time.Now().Add(-1 * time.Minute).UnixNano(), size: 1500}, // Inside rate window
+ },
+ },
+ },
+ },
+ },
+ windowSize: time.Hour,
+ rateWindow: 5 * time.Minute,
+ bucketDuration: time.Minute,
+ // request data
+ tenantID: "tenant1",
+ partitionIDs: []int32{0},
+ streamHashes: []uint64{1, 2},
+ // expectations
+ expectedActive: 2,
+ // Only count size from buckets within rate window: 1000 + 1500 + 1500 + 1500 = 5500
+ expectedRate: int64(5500) / int64(5*60), // 5500 bytes / 5 minutes in seconds = 18.33, truncated to 18
},
}
@@ -185,7 +279,9 @@ func TestIngestLimits_GetStreamUsage(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
s := &IngestLimits{
cfg: Config{
- WindowSize: tt.windowSize,
+ WindowSize: tt.windowSize,
+ RateWindow: tt.rateWindow,
+ BucketDuration: tt.bucketDuration,
LifecyclerConfig: ring.LifecyclerConfig{
RingConfig: ring.Config{
KVStore: kv.Config{
@@ -206,45 +302,51 @@ func TestIngestLimits_GetStreamUsage(t *testing.T) {
metadata: tt.metadata,
partitionManager: NewPartitionManager(log.NewNopLogger()),
}
+
// Assign the Partition IDs.
partitions := make(map[string][]int32)
partitions["test"] = make([]int32, 0, len(tt.assignedPartitionIDs))
partitions["test"] = append(partitions["test"], tt.assignedPartitionIDs...)
s.partitionManager.Assign(context.Background(), nil, partitions)
+
// Call GetStreamUsage.
req := &logproto.GetStreamUsageRequest{
Tenant: tt.tenantID,
Partitions: tt.partitionIDs,
StreamHashes: tt.streamHashes,
}
+
resp, err := s.GetStreamUsage(context.Background(), req)
require.NoError(t, err)
require.NotNil(t, resp)
require.Equal(t, tt.tenantID, resp.Tenant)
require.Equal(t, tt.expectedActive, resp.ActiveStreams)
require.Len(t, resp.UnknownStreams, len(tt.expectedUnknownStreams))
+ require.Equal(t, tt.expectedRate, resp.Rate)
})
}
}
func TestIngestLimits_GetStreamUsage_Concurrent(t *testing.T) {
- // Setup test data with a mix of active and expired streams
now := time.Now()
+ // Setup test data with a mix of active and expired streams>
metadata := map[string]map[int32][]streamMetadata{
"tenant1": {
0: []streamMetadata{
- {hash: 1, lastSeenAt: now.UnixNano()}, // active
- {hash: 2, lastSeenAt: now.Add(-30 * time.Minute).UnixNano()}, // active
- {hash: 3, lastSeenAt: now.Add(-2 * time.Hour).UnixNano()}, // expired
- {hash: 4, lastSeenAt: now.Add(-45 * time.Minute).UnixNano()}, // active
- {hash: 5, lastSeenAt: now.Add(-3 * time.Hour).UnixNano()}, // expired
+ {hash: 1, lastSeenAt: now.UnixNano(), totalSize: 1000, rateBuckets: []rateBucket{{timestamp: now.UnixNano(), size: 1000}}}, // active
+ {hash: 2, lastSeenAt: now.Add(-30 * time.Minute).UnixNano(), totalSize: 2000, rateBuckets: []rateBucket{{timestamp: now.UnixNano(), size: 2000}}}, // active
+ {hash: 3, lastSeenAt: now.Add(-2 * time.Hour).UnixNano(), totalSize: 3000}, // expired
+ {hash: 4, lastSeenAt: now.Add(-45 * time.Minute).UnixNano(), totalSize: 4000, rateBuckets: []rateBucket{{timestamp: now.UnixNano(), size: 4000}}}, // active
+ {hash: 5, lastSeenAt: now.Add(-3 * time.Hour).UnixNano(), totalSize: 5000}, // expired
},
},
}
s := &IngestLimits{
cfg: Config{
- WindowSize: time.Hour,
+ WindowSize: time.Hour,
+ RateWindow: 5 * time.Minute,
+ BucketDuration: time.Minute,
LifecyclerConfig: ring.LifecyclerConfig{
RingConfig: ring.Config{
KVStore: kv.Config{
@@ -283,6 +385,9 @@ func TestIngestLimits_GetStreamUsage_Concurrent(t *testing.T) {
require.NotNil(t, resp)
require.Equal(t, "tenant1", resp.Tenant)
require.Equal(t, uint64(3), resp.ActiveStreams) // Should count only the 3 active streams
+
+ expectedRate := int64(7000) / int64(5*60)
+ require.Equal(t, expectedRate, resp.Rate)
}()
}
@@ -293,6 +398,7 @@ func TestIngestLimits_GetStreamUsage_Concurrent(t *testing.T) {
}
func TestIngestLimits_UpdateMetadata(t *testing.T) {
+
tests := []struct {
name string
@@ -300,6 +406,10 @@ func TestIngestLimits_UpdateMetadata(t *testing.T) {
assignedPartitionIDs []int32
metadata map[string]map[int32][]streamMetadata
+ // config
+ bucketDuration time.Duration
+ rateWindow time.Duration
+
// The test case.
tenantID string
partitionID int32
@@ -316,13 +426,24 @@ func TestIngestLimits_UpdateMetadata(t *testing.T) {
tenantID: "tenant1",
partitionID: 0,
lastSeenAt: time.Unix(100, 0),
+ bucketDuration: time.Minute,
+ rateWindow: 5 * time.Minute,
updateMetadata: &logproto.StreamMetadata{
- StreamHash: 123,
+ StreamHash: 123,
+ EntriesSize: 1000,
+ StructuredMetadataSize: 500,
},
expected: map[string]map[int32][]streamMetadata{
"tenant1": {
0: {
- {hash: 123, lastSeenAt: time.Unix(100, 0).UnixNano()},
+ {
+ hash: 123,
+ lastSeenAt: time.Unix(100, 0).UnixNano(),
+ totalSize: 1500,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(100, 0).Truncate(time.Minute).UnixNano(), size: 1500},
+ },
+ },
},
},
},
@@ -333,23 +454,48 @@ func TestIngestLimits_UpdateMetadata(t *testing.T) {
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: {
- {hash: 123, lastSeenAt: time.Unix(100, 0).UnixNano()},
+ {
+ hash: 123,
+ lastSeenAt: time.Unix(100, 0).UnixNano(),
+ totalSize: 1000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(100, 0).Truncate(time.Minute).UnixNano(), size: 1000},
+ },
+ },
},
},
},
tenantID: "tenant1",
partitionID: 1,
updateMetadata: &logproto.StreamMetadata{
- StreamHash: 456,
+ StreamHash: 456,
+ EntriesSize: 2000,
+ StructuredMetadataSize: 1000,
},
- lastSeenAt: time.Unix(200, 0),
+ lastSeenAt: time.Unix(200, 0),
+ bucketDuration: time.Minute,
+ rateWindow: 5 * time.Minute,
expected: map[string]map[int32][]streamMetadata{
"tenant1": {
0: {
- {hash: 123, lastSeenAt: time.Unix(100, 0).UnixNano()},
+ {
+ hash: 123,
+ lastSeenAt: time.Unix(100, 0).UnixNano(),
+ totalSize: 1000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(100, 0).Truncate(time.Minute).UnixNano(), size: 1000},
+ },
+ },
},
1: {
- {hash: 456, lastSeenAt: time.Unix(200, 0).UnixNano()},
+ {
+ hash: 456,
+ lastSeenAt: time.Unix(200, 0).UnixNano(),
+ totalSize: 3000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(200, 0).Truncate(time.Minute).UnixNano(), size: 3000},
+ },
+ },
},
},
},
@@ -360,20 +506,39 @@ func TestIngestLimits_UpdateMetadata(t *testing.T) {
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: {
- {hash: 123, lastSeenAt: time.Unix(100, 0).UnixNano()},
+ {
+ hash: 123,
+ lastSeenAt: time.Unix(100, 0).UnixNano(),
+ totalSize: 1000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(100, 0).Truncate(time.Minute).UnixNano(), size: 1000},
+ },
+ },
},
},
},
tenantID: "tenant1",
partitionID: 0,
updateMetadata: &logproto.StreamMetadata{
- StreamHash: 123,
+ StreamHash: 123,
+ EntriesSize: 3000,
+ StructuredMetadataSize: 1500,
},
- lastSeenAt: time.Unix(300, 0),
+ lastSeenAt: time.Unix(300, 0),
+ bucketDuration: time.Minute,
+ rateWindow: 5 * time.Minute,
expected: map[string]map[int32][]streamMetadata{
"tenant1": {
0: {
- {hash: 123, lastSeenAt: time.Unix(300, 0).UnixNano()},
+ {
+ hash: 123,
+ lastSeenAt: time.Unix(300, 0).UnixNano(),
+ totalSize: 5500,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(100, 0).Truncate(time.Minute).UnixNano(), size: 1000},
+ {timestamp: time.Unix(300, 0).Truncate(time.Minute).UnixNano(), size: 4500},
+ },
+ },
},
},
},
@@ -384,21 +549,173 @@ func TestIngestLimits_UpdateMetadata(t *testing.T) {
metadata: map[string]map[int32][]streamMetadata{
"tenant1": {
0: {
- {hash: 123, lastSeenAt: time.Unix(100, 0).UnixNano()},
- {hash: 456, lastSeenAt: time.Unix(200, 0).UnixNano()},
+ {
+ hash: 123,
+ lastSeenAt: time.Unix(100, 0).UnixNano(),
+ totalSize: 1000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(100, 0).Truncate(time.Minute).UnixNano(), size: 1000},
+ },
+ },
+ {
+ hash: 456,
+ lastSeenAt: time.Unix(200, 0).UnixNano(),
+ totalSize: 3000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(200, 0).Truncate(time.Minute).UnixNano(), size: 3000},
+ },
+ },
},
},
},
tenantID: "tenant1",
partitionID: 0,
updateMetadata: &logproto.StreamMetadata{
- StreamHash: 123,
+ StreamHash: 123,
+ EntriesSize: 4000,
+ StructuredMetadataSize: 2000,
},
- lastSeenAt: time.Unix(400, 0),
+ lastSeenAt: time.Unix(400, 0),
+ bucketDuration: time.Minute,
+ rateWindow: 5 * time.Minute,
expected: map[string]map[int32][]streamMetadata{
"tenant1": {
0: {
- {hash: 456, lastSeenAt: time.Unix(200, 0).UnixNano()},
+ {
+ hash: 456,
+ lastSeenAt: time.Unix(200, 0).UnixNano(),
+ totalSize: 3000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(200, 0).Truncate(time.Minute).UnixNano(), size: 3000},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "update existing bucket",
+ tenantID: "tenant1",
+ bucketDuration: time.Minute,
+ rateWindow: 5 * time.Minute,
+ updateMetadata: &logproto.StreamMetadata{
+ StreamHash: 888,
+ EntriesSize: 1000,
+ StructuredMetadataSize: 500,
+ },
+ assignedPartitionIDs: []int32{0},
+ lastSeenAt: time.Unix(852, 0),
+ metadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: {
+ {
+ hash: 888,
+ lastSeenAt: time.Unix(850, 0).UnixNano(),
+ totalSize: 1500,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(850, 0).Truncate(time.Minute).UnixNano(), size: 1500},
+ },
+ },
+ },
+ },
+ },
+ expected: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: {
+ {
+ hash: 888,
+ lastSeenAt: time.Unix(852, 0).UnixNano(),
+ totalSize: 3000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(850, 0).Truncate(time.Minute).UnixNano(), size: 3000},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "clean up buckets outside rate window",
+ tenantID: "tenant1",
+ bucketDuration: time.Minute,
+ rateWindow: 5 * time.Minute,
+ updateMetadata: &logproto.StreamMetadata{
+ StreamHash: 999,
+ EntriesSize: 2000,
+ StructuredMetadataSize: 1000,
+ },
+ assignedPartitionIDs: []int32{0},
+ lastSeenAt: time.Unix(1000, 0), // Current time reference
+ metadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: {
+ {
+ hash: 999,
+ lastSeenAt: time.Unix(950, 0).UnixNano(),
+ totalSize: 5000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(1000, 0).Add(-5 * time.Minute).Truncate(time.Minute).UnixNano(), size: 1000}, // Old, outside window
+ {timestamp: time.Unix(1000, 0).Add(-10 * time.Minute).Truncate(time.Minute).UnixNano(), size: 1500}, // Outside rate window (>5 min old from 1000)
+ {timestamp: time.Unix(950, 0).Truncate(time.Minute).UnixNano(), size: 2500}, // Recent, within window
+ },
+ },
+ },
+ },
+ },
+ expected: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: {
+ {
+ hash: 999,
+ lastSeenAt: time.Unix(1000, 0).UnixNano(),
+ totalSize: 8000, // Old total + new 3000
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(950, 0).Truncate(time.Minute).UnixNano(), size: 2500},
+ {timestamp: time.Unix(1000, 0).Truncate(time.Minute).UnixNano(), size: 3000},
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "update same minute bucket",
+ tenantID: "tenant1",
+ bucketDuration: time.Minute,
+ rateWindow: 5 * time.Minute,
+ updateMetadata: &logproto.StreamMetadata{
+ StreamHash: 555,
+ EntriesSize: 1000,
+ StructuredMetadataSize: 500,
+ },
+ assignedPartitionIDs: []int32{0},
+ lastSeenAt: time.Unix(1100, 0),
+ metadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: {
+ {
+ hash: 555,
+ lastSeenAt: time.Unix(1080, 0).UnixNano(), // Same minute as new data
+ totalSize: 2000,
+ rateBuckets: []rateBucket{
+ {timestamp: time.Unix(1080, 0).Truncate(time.Minute).UnixNano(), size: 2000},
+ },
+ },
+ },
+ },
+ },
+ expected: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: {
+ {
+ hash: 555,
+ lastSeenAt: time.Unix(1100, 0).UnixNano(),
+ totalSize: 3500, // 2000 + 1500
+ rateBuckets: []rateBucket{
+ // Same bucket as before but updated with new size
+ {timestamp: time.Unix(1100, 0).Truncate(time.Minute).UnixNano(), size: 3500},
+ },
+ },
},
},
},
@@ -408,6 +725,10 @@ func TestIngestLimits_UpdateMetadata(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &IngestLimits{
+ cfg: Config{
+ BucketDuration: tt.bucketDuration,
+ RateWindow: tt.rateWindow,
+ },
metadata: tt.metadata,
metrics: newMetrics(prometheus.NewRegistry()),
partitionManager: NewPartitionManager(log.NewNopLogger()),
@@ -420,7 +741,320 @@ func TestIngestLimits_UpdateMetadata(t *testing.T) {
s.updateMetadata(tt.updateMetadata, tt.tenantID, tt.partitionID, tt.lastSeenAt)
+ // For tests with sizeBuckets, we need to check specifically
+ if len(tt.expected) > 0 {
+ for tenant, partitions := range tt.expected {
+ for partition, streams := range partitions {
+ for i, expectedStream := range streams {
+ if len(expectedStream.rateBuckets) > 0 {
+ require.Equal(t, len(expectedStream.rateBuckets), len(s.metadata[tenant][partition][i].rateBuckets),
+ "Number of size buckets does not match for stream %d", expectedStream.hash)
+
+ // Check each bucket
+ for j, expectedBucket := range expectedStream.rateBuckets {
+ require.Equal(t, expectedBucket.timestamp, s.metadata[tenant][partition][i].rateBuckets[j].timestamp,
+ "Bucket timestamp mismatch for stream %d, bucket %d", expectedStream.hash, j)
+ require.Equal(t, expectedBucket.size, s.metadata[tenant][partition][i].rateBuckets[j].size,
+ "Bucket size mismatch for stream %d, bucket %d", expectedStream.hash, j)
+ }
+ }
+ }
+ }
+ }
+ }
+
require.Equal(t, tt.expected, s.metadata)
})
}
}
+
+func TestNewIngestLimits(t *testing.T) {
+ cfg := Config{
+ KafkaConfig: kafka.Config{
+ Topic: "test-topic",
+ },
+ WindowSize: time.Hour,
+ LifecyclerConfig: ring.LifecyclerConfig{
+ RingConfig: ring.Config{
+ KVStore: kv.Config{
+ Store: "inmemory",
+ },
+ ReplicationFactor: 1,
+ },
+ NumTokens: 1,
+ ID: "test",
+ Zone: "test",
+ FinalSleep: 0,
+ HeartbeatPeriod: 100 * time.Millisecond,
+ ObservePeriod: 100 * time.Millisecond,
+ },
+ }
+ s, err := NewIngestLimits(cfg, log.NewNopLogger(), prometheus.NewRegistry())
+ require.NoError(t, err)
+ require.NotNil(t, s)
+ require.NotNil(t, s.client)
+
+ require.Equal(t, cfg, s.cfg)
+
+ require.NotNil(t, s.metadata)
+ require.NotNil(t, s.lifecycler)
+}
+
+func TestIngestLimits_evictOldStreams(t *testing.T) {
+ tests := []struct {
+ name string
+ initialMetadata map[string]map[int32][]streamMetadata
+ windowSize time.Duration
+ assignedPartitionIDs []int32
+ expectedMetadata map[string]map[int32][]streamMetadata
+ expectedEvictions map[string]int
+ }{
+ {
+ name: "all streams active",
+ initialMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ {hash: 2, lastSeenAt: time.Now().UnixNano(), totalSize: 2000},
+ },
+ },
+ },
+ windowSize: time.Hour,
+ assignedPartitionIDs: []int32{0},
+ expectedMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ {hash: 2, lastSeenAt: time.Now().UnixNano(), totalSize: 2000},
+ },
+ },
+ },
+ expectedEvictions: map[string]int{
+ "tenant1": 0,
+ },
+ },
+ {
+ name: "all streams expired",
+ initialMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 1000},
+ {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 2000},
+ },
+ },
+ },
+ windowSize: time.Hour,
+ assignedPartitionIDs: []int32{0},
+ expectedMetadata: map[string]map[int32][]streamMetadata{},
+ expectedEvictions: map[string]int{
+ "tenant1": 2,
+ },
+ },
+ {
+ name: "mixed active and expired streams",
+ initialMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 2000},
+ {hash: 3, lastSeenAt: time.Now().UnixNano(), totalSize: 3000},
+ },
+ },
+ },
+ windowSize: time.Hour,
+ assignedPartitionIDs: []int32{0},
+ expectedMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ {hash: 3, lastSeenAt: time.Now().UnixNano(), totalSize: 3000},
+ },
+ },
+ },
+ expectedEvictions: map[string]int{
+ "tenant1": 1,
+ },
+ },
+ {
+ name: "multiple tenants with mixed streams",
+ initialMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 2000},
+ },
+ },
+ "tenant2": {
+ 0: []streamMetadata{
+ {hash: 3, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 3000},
+ {hash: 4, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 4000},
+ },
+ },
+ "tenant3": {
+ 0: []streamMetadata{
+ {hash: 5, lastSeenAt: time.Now().UnixNano(), totalSize: 5000},
+ },
+ },
+ },
+ windowSize: time.Hour,
+ assignedPartitionIDs: []int32{0},
+ expectedMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ },
+ },
+ "tenant3": {
+ 0: []streamMetadata{
+ {hash: 5, lastSeenAt: time.Now().UnixNano(), totalSize: 5000},
+ },
+ },
+ },
+ expectedEvictions: map[string]int{
+ "tenant1": 1,
+ "tenant2": 2,
+ "tenant3": 0,
+ },
+ },
+ {
+ name: "multiple partitions with some empty after eviction",
+ initialMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 2000},
+ },
+ 1: []streamMetadata{
+ {hash: 3, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 3000},
+ },
+ 2: []streamMetadata{
+ {hash: 4, lastSeenAt: time.Now().UnixNano(), totalSize: 4000},
+ },
+ },
+ },
+ windowSize: time.Hour,
+ assignedPartitionIDs: []int32{0, 1, 2},
+ expectedMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ },
+ 2: []streamMetadata{
+ {hash: 4, lastSeenAt: time.Now().UnixNano(), totalSize: 4000},
+ },
+ },
+ },
+ expectedEvictions: map[string]int{
+ "tenant1": 2,
+ },
+ },
+ {
+ name: "unassigned partitions should still be evicted",
+ initialMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ },
+ 1: []streamMetadata{
+ {hash: 2, lastSeenAt: time.Now().Add(-2 * time.Hour).UnixNano(), totalSize: 2000},
+ },
+ },
+ },
+ windowSize: time.Hour,
+ assignedPartitionIDs: []int32{0},
+ expectedMetadata: map[string]map[int32][]streamMetadata{
+ "tenant1": {
+ 0: []streamMetadata{
+ {hash: 1, lastSeenAt: time.Now().UnixNano(), totalSize: 1000},
+ },
+ },
+ },
+ expectedEvictions: map[string]int{
+ "tenant1": 1,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // Create a registry to capture metrics
+ reg := prometheus.NewRegistry()
+
+ // Create IngestLimits instance with mock data
+ s := &IngestLimits{
+ cfg: Config{
+ WindowSize: tt.windowSize,
+ },
+ logger: log.NewNopLogger(),
+ metrics: newMetrics(reg),
+ metadata: deepCopyMetadata(tt.initialMetadata),
+ partitionManager: NewPartitionManager(log.NewNopLogger()),
+ }
+
+ // Assign the Partition IDs.
+ partitions := make(map[string][]int32)
+ partitions["test"] = make([]int32, 0, len(tt.assignedPartitionIDs))
+ for _, partitionID := range tt.assignedPartitionIDs {
+ partitions["test"] = append(partitions["test"], partitionID)
+ }
+ s.partitionManager.Assign(context.Background(), nil, partitions)
+
+ // Call evictOldStreams
+ s.evictOldStreams(context.Background())
+
+ // Verify metadata after eviction
+ require.Equal(t, len(tt.expectedMetadata), len(s.metadata), "number of tenants after eviction")
+
+ for tenant, expectedPartitions := range tt.expectedMetadata {
+ require.Contains(t, s.metadata, tenant, "tenant should exist after eviction")
+
+ actualPartitions := s.metadata[tenant]
+ require.Equal(t, len(expectedPartitions), len(actualPartitions),
+ "number of partitions for tenant %s after eviction", tenant)
+
+ for partitionID, expectedStreams := range expectedPartitions {
+ require.Contains(t, actualPartitions, partitionID,
+ "partition %d should exist for tenant %s after eviction", partitionID, tenant)
+
+ actualStreams := actualPartitions[partitionID]
+ require.Equal(t, len(expectedStreams), len(actualStreams),
+ "number of streams for tenant %s partition %d after eviction", tenant, partitionID)
+
+ // Check that all expected streams exist
+ // Note: We don't check exact lastSeenAt timestamps as they're generated at test time
+ streamMap := make(map[uint64]bool)
+ for _, stream := range actualStreams {
+ streamMap[stream.hash] = true
+ }
+
+ for _, expectedStream := range expectedStreams {
+ require.True(t, streamMap[expectedStream.hash],
+ "stream with hash %d should exist for tenant %s partition %d after eviction",
+ expectedStream.hash, tenant, partitionID)
+ }
+ }
+ }
+
+ // Verify that tenants not in expectedMetadata don't exist in actual metadata
+ for tenant := range tt.initialMetadata {
+ if _, exists := tt.expectedMetadata[tenant]; !exists {
+ require.NotContains(t, s.metadata, tenant,
+ "tenant %s should not exist after eviction", tenant)
+ }
+ }
+ })
+ }
+}
+
+// Helper function to deep copy metadata map for testing
+func deepCopyMetadata(src map[string]map[int32][]streamMetadata) map[string]map[int32][]streamMetadata {
+ dst := make(map[string]map[int32][]streamMetadata)
+ for tenant, partitions := range src {
+ dst[tenant] = make(map[int32][]streamMetadata)
+ for partitionID, streams := range partitions {
+ dst[tenant][partitionID] = make([]streamMetadata, len(streams))
+ copy(dst[tenant][partitionID], streams)
+ }
+ }
+ return dst
+}
diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go
index b45bf8c4e1399..39b3708cab5c4 100644
--- a/pkg/logproto/logproto.pb.go
+++ b/pkg/logproto/logproto.pb.go
@@ -231,7 +231,7 @@ func (m *StreamRatesResponse) GetStreamRates() []*StreamRate {
type StreamMetadata struct {
StreamHash uint64 `protobuf:"varint,1,opt,name=streamHash,proto3" json:"streamHash,omitempty"`
- LineSize uint64 `protobuf:"varint,2,opt,name=lineSize,proto3" json:"lineSize,omitempty"`
+ EntriesSize uint64 `protobuf:"varint,2,opt,name=entriesSize,proto3" json:"entriesSize,omitempty"`
StructuredMetadataSize uint64 `protobuf:"varint,3,opt,name=structuredMetadataSize,proto3" json:"structuredMetadataSize,omitempty"`
}
@@ -274,9 +274,9 @@ func (m *StreamMetadata) GetStreamHash() uint64 {
return 0
}
-func (m *StreamMetadata) GetLineSize() uint64 {
+func (m *StreamMetadata) GetEntriesSize() uint64 {
if m != nil {
- return m.LineSize
+ return m.EntriesSize
}
return 0
}
@@ -503,7 +503,8 @@ func (m *GetStreamUsageRequest) GetStreamHashes() []uint64 {
type GetStreamUsageResponse struct {
Tenant string `protobuf:"bytes,1,opt,name=tenant,proto3" json:"tenant,omitempty"`
ActiveStreams uint64 `protobuf:"varint,2,opt,name=activeStreams,proto3" json:"activeStreams,omitempty"`
- UnknownStreams []uint64 `protobuf:"varint,3,rep,packed,name=unknownStreams,proto3" json:"unknownStreams,omitempty"`
+ Rate int64 `protobuf:"varint,3,opt,name=rate,proto3" json:"rate,omitempty"`
+ UnknownStreams []uint64 `protobuf:"varint,4,rep,packed,name=unknownStreams,proto3" json:"unknownStreams,omitempty"`
}
func (m *GetStreamUsageResponse) Reset() { *m = GetStreamUsageResponse{} }
@@ -552,6 +553,13 @@ func (m *GetStreamUsageResponse) GetActiveStreams() uint64 {
return 0
}
+func (m *GetStreamUsageResponse) GetRate() int64 {
+ if m != nil {
+ return m.Rate
+ }
+ return 0
+}
+
func (m *GetStreamUsageResponse) GetUnknownStreams() []uint64 {
if m != nil {
return m.UnknownStreams
@@ -3579,202 +3587,203 @@ func init() {
func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) }
var fileDescriptor_c28a5f14f1f4c79a = []byte{
- // 3117 bytes of a gzipped FileDescriptorProto
+ // 3121 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4b, 0x6c, 0x1b, 0xc7,
0x95, 0xcb, 0x9f, 0xc8, 0x47, 0x4a, 0x96, 0x47, 0xb2, 0x4c, 0xc8, 0x36, 0xa9, 0x0c, 0x52, 0xdb,
0x89, 0x1d, 0xd1, 0x56, 0x1a, 0x37, 0x71, 0x9a, 0xa6, 0xa6, 0x64, 0x2b, 0x76, 0xe4, 0x4f, 0x46,
0xb6, 0x93, 0x16, 0x0d, 0x82, 0x35, 0x39, 0xa2, 0x36, 0x22, 0x77, 0xe9, 0xdd, 0xa1, 0x6d, 0xf5,
- 0x50, 0xf4, 0xd0, 0x6b, 0xd1, 0x00, 0x45, 0xd1, 0xf6, 0x52, 0xa0, 0x40, 0x81, 0x16, 0x05, 0x72,
- 0x29, 0x7a, 0xe8, 0xa1, 0x68, 0x2f, 0x05, 0x9a, 0xde, 0xd2, 0x53, 0x83, 0x1c, 0xd8, 0x46, 0xb9,
- 0x14, 0x02, 0x0a, 0xe4, 0xd4, 0x02, 0x39, 0x15, 0xf3, 0xdb, 0x9d, 0x5d, 0x92, 0x51, 0xe4, 0xba,
- 0x48, 0x72, 0x21, 0x67, 0xde, 0xbc, 0x79, 0x33, 0xef, 0x33, 0xef, 0xbd, 0x79, 0x3b, 0x70, 0xa4,
- 0xb7, 0xd5, 0xae, 0x77, 0xbc, 0x76, 0xcf, 0xf7, 0x98, 0x17, 0x36, 0x16, 0xc5, 0x2f, 0x2a, 0xe8,
- 0xfe, 0xfc, 0x6c, 0xdb, 0x6b, 0x7b, 0x12, 0x87, 0xb7, 0xe4, 0xf8, 0x7c, 0xad, 0xed, 0x79, 0xed,
- 0x0e, 0xad, 0x8b, 0xde, 0x9d, 0xfe, 0x46, 0x9d, 0x39, 0x5d, 0x1a, 0x30, 0xbb, 0xdb, 0x53, 0x08,
- 0x0b, 0x8a, 0xfa, 0xdd, 0x4e, 0xd7, 0x6b, 0xd1, 0x4e, 0x3d, 0x60, 0x36, 0x0b, 0xe4, 0xaf, 0xc2,
- 0x98, 0xe1, 0x18, 0xbd, 0x7e, 0xb0, 0x29, 0x7e, 0x14, 0xf0, 0x0c, 0x07, 0x06, 0xcc, 0xf3, 0xed,
- 0x36, 0xad, 0x37, 0x37, 0xfb, 0xee, 0x56, 0xbd, 0x69, 0x37, 0x37, 0x69, 0xdd, 0xa7, 0x41, 0xbf,
- 0xc3, 0x02, 0xd9, 0x61, 0xdb, 0x3d, 0xaa, 0xc8, 0xe0, 0xdf, 0x5a, 0x70, 0x68, 0xcd, 0xbe, 0x43,
- 0x3b, 0x37, 0xbd, 0xdb, 0x76, 0xa7, 0x4f, 0x03, 0x42, 0x83, 0x9e, 0xe7, 0x06, 0x14, 0x2d, 0x43,
- 0xbe, 0xc3, 0x07, 0x82, 0x8a, 0xb5, 0x90, 0x39, 0x59, 0x5a, 0x3a, 0xb5, 0x18, 0x32, 0x39, 0x72,
- 0x82, 0x84, 0x06, 0x17, 0x5d, 0xe6, 0x6f, 0x13, 0x35, 0x75, 0xfe, 0x36, 0x94, 0x0c, 0x30, 0x9a,
- 0x86, 0xcc, 0x16, 0xdd, 0xae, 0x58, 0x0b, 0xd6, 0xc9, 0x22, 0xe1, 0x4d, 0x74, 0x16, 0x72, 0xf7,
- 0x38, 0x99, 0x4a, 0x7a, 0xc1, 0x3a, 0x59, 0x5a, 0x3a, 0x12, 0x2d, 0x72, 0xcb, 0x75, 0xee, 0xf6,
- 0xa9, 0x98, 0xad, 0x16, 0x92, 0x98, 0xe7, 0xd3, 0xcf, 0x5a, 0xf8, 0x14, 0x1c, 0x1c, 0x1a, 0x47,
- 0x73, 0x90, 0x17, 0x18, 0x72, 0xc7, 0x45, 0xa2, 0x7a, 0x78, 0x16, 0xd0, 0x3a, 0xf3, 0xa9, 0xdd,
- 0x25, 0x36, 0xe3, 0xfb, 0xbd, 0xdb, 0xa7, 0x01, 0xc3, 0x57, 0x61, 0x26, 0x06, 0x55, 0x6c, 0x9f,
- 0x83, 0x52, 0x10, 0x81, 0x15, 0xef, 0xb3, 0xd1, 0xb6, 0xa2, 0x39, 0xc4, 0x44, 0xc4, 0xdf, 0xb3,
- 0x60, 0x4a, 0x8e, 0x5d, 0xa5, 0xcc, 0x6e, 0xd9, 0xcc, 0x46, 0x55, 0x00, 0x89, 0xf1, 0x92, 0x1d,
- 0x6c, 0x0a, 0xa6, 0xb3, 0xc4, 0x80, 0xa0, 0x79, 0x28, 0x74, 0x1c, 0x97, 0xae, 0x3b, 0xdf, 0x96,
- 0xec, 0x67, 0x49, 0xd8, 0x47, 0xe7, 0x60, 0x2e, 0x60, 0x7e, 0xbf, 0xc9, 0xfa, 0x3e, 0x6d, 0x69,
- 0x8a, 0x02, 0x33, 0x23, 0x30, 0xc7, 0x8c, 0xe2, 0x3b, 0x30, 0x7b, 0xf1, 0x41, 0x93, 0xd2, 0x56,
- 0xb0, 0xe6, 0x74, 0x1d, 0xa6, 0xb9, 0xe5, 0xb2, 0x61, 0xd4, 0xb5, 0x5d, 0xa6, 0x84, 0xaf, 0x7a,
- 0x68, 0x09, 0x26, 0xe4, 0x8e, 0x82, 0x4a, 0x5a, 0xb0, 0x5a, 0x49, 0xb2, 0xaa, 0xc9, 0x13, 0x8d,
- 0x88, 0x03, 0x38, 0x94, 0x58, 0x43, 0xc9, 0x6e, 0xdc, 0x22, 0x0d, 0x38, 0xe0, 0xd3, 0x37, 0x69,
- 0x93, 0xd1, 0xd6, 0xfa, 0xb8, 0xc5, 0x48, 0x0c, 0x81, 0x24, 0x27, 0xe0, 0x97, 0x60, 0x2a, 0x8e,
- 0xb2, 0xa7, 0x78, 0xe7, 0x20, 0xef, 0x53, 0x3b, 0xf0, 0x5c, 0x21, 0xdc, 0x22, 0x51, 0x3d, 0xbe,
- 0xfd, 0x55, 0xca, 0x24, 0x91, 0x5b, 0x81, 0xdd, 0xa6, 0x7b, 0xc9, 0xa8, 0x0a, 0xd0, 0xb3, 0x7d,
- 0xe6, 0x30, 0xc7, 0x73, 0xe5, 0xce, 0x73, 0xc4, 0x80, 0x20, 0x0c, 0xe5, 0x68, 0x59, 0x1a, 0x54,
- 0x32, 0x0b, 0x99, 0x93, 0x59, 0x12, 0x83, 0xe1, 0xef, 0xc0, 0x5c, 0x72, 0xd1, 0x3d, 0x84, 0xf6,
- 0x38, 0x4c, 0xda, 0x4d, 0xe6, 0xdc, 0xa3, 0x91, 0xc8, 0x38, 0x87, 0x71, 0x20, 0x3a, 0x0e, 0x53,
- 0x7d, 0x77, 0xcb, 0xf5, 0xee, 0xbb, 0x1a, 0x4d, 0xae, 0x9e, 0x80, 0xe2, 0x9f, 0x59, 0x00, 0x91,
- 0xe9, 0xee, 0x29, 0xbb, 0xd3, 0x70, 0x30, 0xea, 0x5d, 0xf3, 0xd6, 0x37, 0x6d, 0xbf, 0xa5, 0x36,
- 0x30, 0x3c, 0x80, 0x10, 0x64, 0x7d, 0x9b, 0x49, 0xd3, 0xcc, 0x10, 0xd1, 0x36, 0xd8, 0xca, 0xc6,
- 0xd8, 0x9a, 0x83, 0x3c, 0x77, 0x58, 0x34, 0xa8, 0xe4, 0x16, 0xac, 0x93, 0x93, 0x44, 0xf5, 0x70,
- 0x15, 0x8e, 0xae, 0x52, 0x76, 0x21, 0x08, 0x9c, 0xb6, 0x4b, 0x5b, 0x37, 0x42, 0xe9, 0xea, 0xe3,
- 0xfa, 0x37, 0x0b, 0x8e, 0x8d, 0x41, 0x50, 0x82, 0xf4, 0x00, 0xd9, 0x43, 0xa3, 0xea, 0x00, 0xbf,
- 0x18, 0x19, 0xda, 0x27, 0x12, 0x59, 0x1c, 0x1e, 0x92, 0x0e, 0x6d, 0x04, 0xe9, 0xf9, 0x8b, 0x70,
- 0x78, 0x0c, 0xba, 0xe9, 0xe8, 0x72, 0xd2, 0xd1, 0xcd, 0x9a, 0x8e, 0x2e, 0x63, 0xfa, 0xb2, 0x7f,
- 0x67, 0xa0, 0xfc, 0x4a, 0x9f, 0xfa, 0xdb, 0xda, 0x0e, 0xab, 0x50, 0x08, 0x68, 0x87, 0x36, 0x99,
- 0xe7, 0x4b, 0x9b, 0x68, 0xa4, 0x2b, 0x16, 0x09, 0x61, 0x9c, 0x54, 0x87, 0x1f, 0x3c, 0x41, 0x6a,
- 0x92, 0xc8, 0x0e, 0x3a, 0x0f, 0xb9, 0x80, 0xd9, 0x3e, 0x13, 0x5a, 0x28, 0x2d, 0xcd, 0x2f, 0xca,
- 0x18, 0xb3, 0xa8, 0x63, 0xcc, 0xe2, 0x4d, 0x1d, 0x63, 0x1a, 0x85, 0x77, 0x06, 0xb5, 0xd4, 0x5b,
- 0x7f, 0xaf, 0x59, 0x44, 0x4e, 0x41, 0xe7, 0x20, 0x43, 0xdd, 0x96, 0xd0, 0xd4, 0xa7, 0x9d, 0xc9,
- 0x27, 0xa0, 0xb3, 0x50, 0x6c, 0x39, 0x3e, 0x6d, 0x72, 0xce, 0x85, 0x3e, 0xa7, 0x96, 0x66, 0x22,
- 0x49, 0xaf, 0xe8, 0x21, 0x12, 0x61, 0xa1, 0xd3, 0x90, 0x0f, 0xb8, 0xd1, 0x04, 0x95, 0x09, 0xee,
- 0xa4, 0x1b, 0xb3, 0xbb, 0x83, 0xda, 0xb4, 0x84, 0x9c, 0xf6, 0xba, 0x0e, 0xa3, 0xdd, 0x1e, 0xdb,
- 0x26, 0x0a, 0x07, 0x3d, 0x09, 0x13, 0x2d, 0xda, 0xa1, 0xdc, 0x13, 0x17, 0x84, 0x22, 0xa7, 0x0d,
- 0xf2, 0x62, 0x80, 0x68, 0x04, 0xf4, 0x3a, 0x64, 0x7b, 0x1d, 0xdb, 0xad, 0x14, 0x05, 0x17, 0x53,
- 0x11, 0xe2, 0x8d, 0x8e, 0xed, 0x36, 0x9e, 0x7b, 0x7f, 0x50, 0x7b, 0xa6, 0xed, 0xb0, 0xcd, 0xfe,
- 0x9d, 0xc5, 0xa6, 0xd7, 0xad, 0xb7, 0x7d, 0x7b, 0xc3, 0x76, 0xed, 0x7a, 0xc7, 0xdb, 0x72, 0xea,
- 0xf7, 0x9e, 0xae, 0xf3, 0xc8, 0x79, 0xb7, 0x4f, 0x7d, 0x87, 0xfa, 0x75, 0x4e, 0x66, 0x51, 0xa8,
- 0x84, 0x4f, 0x25, 0x82, 0x2c, 0xba, 0xc2, 0x03, 0x83, 0xe7, 0xd3, 0x65, 0x1e, 0x56, 0x83, 0x0a,
- 0x88, 0x55, 0x0e, 0x47, 0xab, 0x08, 0x38, 0xa1, 0x1b, 0xab, 0xbe, 0xd7, 0xef, 0x35, 0x0e, 0xec,
- 0x0e, 0x6a, 0x26, 0x3e, 0x31, 0x3b, 0x57, 0xb2, 0x85, 0xfc, 0xf4, 0x04, 0x7e, 0x3b, 0x03, 0x68,
- 0xdd, 0xee, 0xf6, 0x3a, 0x74, 0x5f, 0xea, 0x0f, 0x15, 0x9d, 0x7e, 0x68, 0x45, 0x67, 0xf6, 0xab,
- 0xe8, 0x48, 0x6b, 0xd9, 0xfd, 0x69, 0x2d, 0xf7, 0x69, 0xb5, 0x96, 0xff, 0xdc, 0x6b, 0x0d, 0x57,
- 0x20, 0xcb, 0x29, 0xf3, 0xc3, 0xed, 0xdb, 0xf7, 0x85, 0x6e, 0xca, 0x84, 0x37, 0xf1, 0x1a, 0xe4,
- 0x25, 0x5f, 0x3c, 0xa6, 0xc7, 0x95, 0x17, 0x3f, 0xb7, 0x91, 0xe2, 0x32, 0x5a, 0x25, 0xd3, 0x91,
- 0x4a, 0x32, 0x42, 0xd8, 0xf8, 0xf7, 0x16, 0x4c, 0x2a, 0x8b, 0x50, 0xae, 0xed, 0x4e, 0x14, 0xa5,
- 0xa5, 0x3f, 0x3b, 0x9c, 0x8c, 0xd2, 0x17, 0x5a, 0x76, 0x8f, 0x51, 0xbf, 0x51, 0x7f, 0x67, 0x50,
- 0xb3, 0xde, 0x1f, 0xd4, 0x4e, 0x8c, 0x13, 0x9a, 0x4e, 0x1b, 0x75, 0x22, 0xa3, 0x09, 0xa3, 0x53,
- 0x62, 0x77, 0x2c, 0x50, 0x66, 0x75, 0x60, 0x51, 0x66, 0x9b, 0x97, 0xdd, 0x36, 0x0d, 0x38, 0xe5,
- 0x2c, 0xb7, 0x08, 0x22, 0x71, 0x38, 0x9b, 0xf7, 0x6d, 0xdf, 0x75, 0xdc, 0xb6, 0x0c, 0x38, 0x45,
- 0x12, 0xf6, 0xf1, 0x4f, 0x2c, 0x98, 0x89, 0x99, 0xb5, 0x62, 0xe2, 0x59, 0xc8, 0x07, 0x5c, 0x53,
- 0x9a, 0x07, 0xc3, 0x28, 0xd6, 0x05, 0xbc, 0x31, 0xa5, 0x36, 0x9f, 0x97, 0x7d, 0xa2, 0xf0, 0x1f,
- 0xdd, 0xd6, 0xfe, 0x64, 0x41, 0x59, 0x64, 0x8c, 0xfa, 0xac, 0x21, 0xc8, 0xba, 0x76, 0x97, 0x2a,
- 0x55, 0x89, 0xb6, 0x91, 0x46, 0xf2, 0xe5, 0x0a, 0x3a, 0x8d, 0xdc, 0xaf, 0x83, 0xb5, 0x1e, 0xda,
- 0xc1, 0x5a, 0xd1, 0xb9, 0x9b, 0x85, 0x1c, 0x37, 0xef, 0x6d, 0xe1, 0x5c, 0x8b, 0x44, 0x76, 0xf0,
- 0x09, 0x98, 0x54, 0x5c, 0x44, 0x39, 0xc4, 0xc8, 0xcc, 0xb7, 0x0b, 0x79, 0xa9, 0x09, 0xf4, 0x38,
- 0x14, 0xc3, 0x3b, 0x86, 0xe0, 0x36, 0xd3, 0xc8, 0xef, 0x0e, 0x6a, 0x69, 0x16, 0x90, 0x68, 0x00,
- 0xd5, 0xcc, 0x20, 0x65, 0x35, 0x8a, 0xbb, 0x83, 0x9a, 0x04, 0xa8, 0x78, 0x85, 0x8e, 0x42, 0x76,
- 0x93, 0x67, 0x0c, 0x22, 0x09, 0x6d, 0x14, 0x76, 0x07, 0x35, 0xd1, 0x27, 0xe2, 0x17, 0xaf, 0x42,
- 0x79, 0x8d, 0xb6, 0xed, 0xe6, 0xb6, 0x5a, 0x34, 0x8c, 0x79, 0x7c, 0x41, 0x4b, 0xd3, 0x78, 0x0c,
- 0xca, 0xe1, 0x8a, 0x6f, 0xa8, 0xbc, 0x26, 0x43, 0x4a, 0x21, 0xec, 0x6a, 0x80, 0x7f, 0x6a, 0x81,
- 0xb2, 0x01, 0x84, 0x8d, 0x6b, 0x08, 0xf7, 0x85, 0xb0, 0x3b, 0xa8, 0x29, 0x88, 0xbe, 0x65, 0xa0,
- 0xe7, 0x61, 0x22, 0x10, 0x2b, 0xea, 0xbc, 0xd2, 0x34, 0x2d, 0x31, 0xd0, 0x38, 0xc0, 0x4d, 0x64,
- 0x77, 0x50, 0xd3, 0x88, 0x44, 0x37, 0xd0, 0x62, 0x2c, 0x15, 0x92, 0x8c, 0x4d, 0xed, 0x0e, 0x6a,
- 0x06, 0xd4, 0x4c, 0x8d, 0xf0, 0xc7, 0x16, 0x94, 0x6e, 0xda, 0x4e, 0x68, 0x42, 0x15, 0xad, 0xa2,
- 0xc8, 0x57, 0x4b, 0x00, 0xb7, 0xc4, 0x16, 0xed, 0xd8, 0xdb, 0x97, 0x3c, 0x5f, 0xd0, 0x9d, 0x24,
- 0x61, 0x3f, 0x8a, 0xe1, 0xd9, 0x91, 0x31, 0x3c, 0xb7, 0x7f, 0xd7, 0xfe, 0xff, 0x75, 0xa4, 0x57,
- 0xb2, 0x85, 0xf4, 0x74, 0x06, 0xbf, 0x6d, 0x41, 0x59, 0x32, 0xaf, 0x2c, 0xef, 0x5b, 0x90, 0x97,
- 0xb2, 0x11, 0xec, 0x7f, 0x82, 0x63, 0x3a, 0xb5, 0x1f, 0xa7, 0xa4, 0x68, 0xa2, 0x17, 0x61, 0xaa,
- 0xe5, 0x7b, 0xbd, 0x5e, 0xf2, 0xde, 0x60, 0xac, 0xb2, 0x62, 0x8e, 0x93, 0x04, 0x3a, 0xfe, 0x8b,
- 0x05, 0x93, 0xca, 0x99, 0x28, 0x75, 0x85, 0x22, 0xb6, 0x1e, 0x3a, 0x7a, 0xa6, 0xf7, 0x1b, 0x3d,
- 0xe7, 0x20, 0xdf, 0xe6, 0xf1, 0x45, 0x3b, 0x24, 0xd5, 0xdb, 0x5f, 0x54, 0xc5, 0x57, 0x60, 0x4a,
- 0xb3, 0x32, 0xc6, 0xa3, 0xce, 0x27, 0x3d, 0xea, 0xe5, 0x16, 0x75, 0x99, 0xb3, 0xe1, 0x84, 0x3e,
- 0x52, 0xe1, 0xe3, 0x1f, 0x58, 0x30, 0x9d, 0x44, 0x41, 0x2b, 0x89, 0x1b, 0xff, 0xf1, 0xf1, 0xe4,
- 0xcc, 0xcb, 0xbe, 0x26, 0xad, 0xae, 0xfc, 0xcf, 0xec, 0x75, 0xe5, 0x8f, 0x65, 0xc2, 0x45, 0xe5,
- 0x15, 0xf0, 0x8f, 0x2d, 0x98, 0x8c, 0xe9, 0x12, 0x3d, 0x0b, 0xd9, 0x0d, 0xdf, 0xeb, 0xee, 0x4b,
- 0x51, 0x62, 0x06, 0xfa, 0x32, 0xa4, 0x99, 0xb7, 0x2f, 0x35, 0xa5, 0x99, 0xc7, 0xb5, 0xa4, 0xd8,
- 0xcf, 0xc8, 0x1b, 0x8b, 0xec, 0xe1, 0x67, 0xa0, 0x28, 0x18, 0xba, 0x61, 0x3b, 0xfe, 0xc8, 0x80,
- 0x31, 0x9a, 0xa1, 0xe7, 0xe1, 0x80, 0x74, 0x86, 0xa3, 0x27, 0x97, 0x47, 0x4d, 0x2e, 0xeb, 0xc9,
- 0x47, 0x20, 0x27, 0x92, 0x0e, 0x3e, 0x85, 0x5f, 0xbe, 0xf5, 0x14, 0xde, 0xc6, 0x87, 0x60, 0x86,
- 0x9f, 0x41, 0xea, 0x07, 0xcb, 0x5e, 0xdf, 0x65, 0xfa, 0x86, 0x74, 0x1a, 0x66, 0xe3, 0x60, 0x65,
- 0x25, 0xb3, 0x90, 0x6b, 0x72, 0x80, 0xa0, 0x31, 0x49, 0x64, 0x07, 0xff, 0xc2, 0x02, 0xb4, 0x4a,
- 0x99, 0x58, 0xe5, 0xf2, 0x4a, 0x78, 0x3c, 0xe6, 0xa1, 0xd0, 0xb5, 0x59, 0x73, 0x93, 0xfa, 0x81,
- 0xce, 0x5f, 0x74, 0xff, 0xb3, 0x48, 0x3c, 0xf1, 0x59, 0x98, 0x89, 0xed, 0x52, 0xf1, 0x34, 0x0f,
- 0x85, 0xa6, 0x82, 0xa9, 0x90, 0x17, 0xf6, 0xf1, 0x6f, 0xd2, 0x50, 0xd0, 0x69, 0x1d, 0x3a, 0x0b,
- 0xa5, 0x0d, 0xc7, 0x6d, 0x53, 0xbf, 0xe7, 0x3b, 0x4a, 0x04, 0x59, 0x99, 0xe6, 0x19, 0x60, 0x62,
- 0x76, 0xd0, 0x53, 0x30, 0xd1, 0x0f, 0xa8, 0xff, 0x86, 0x23, 0x4f, 0x7a, 0xb1, 0x31, 0xbb, 0x33,
- 0xa8, 0xe5, 0x6f, 0x05, 0xd4, 0xbf, 0xbc, 0xc2, 0x83, 0x4f, 0x5f, 0xb4, 0x88, 0xfc, 0x6f, 0xa1,
- 0x97, 0x95, 0x99, 0x8a, 0x04, 0xae, 0xf1, 0x15, 0xbe, 0xfd, 0x84, 0xab, 0xeb, 0xf9, 0x5e, 0x97,
- 0xb2, 0x4d, 0xda, 0x0f, 0xea, 0x4d, 0xaf, 0xdb, 0xf5, 0xdc, 0xba, 0x28, 0xea, 0x09, 0xa6, 0x79,
- 0x04, 0xe5, 0xd3, 0x95, 0xe5, 0xde, 0x84, 0x09, 0xb6, 0xe9, 0x7b, 0xfd, 0xf6, 0xa6, 0x08, 0x0c,
- 0x99, 0xc6, 0xf9, 0xfd, 0xd3, 0xd3, 0x14, 0x88, 0x6e, 0xa0, 0xc7, 0xb8, 0xb4, 0x68, 0x73, 0x2b,
- 0xe8, 0x77, 0xe5, 0xad, 0xbb, 0x91, 0xdb, 0x1d, 0xd4, 0xac, 0xa7, 0x48, 0x08, 0xc6, 0x17, 0x60,
- 0x32, 0x96, 0x0a, 0xa3, 0x33, 0x90, 0xf5, 0xe9, 0x86, 0x76, 0x05, 0x68, 0x38, 0x63, 0x96, 0xd1,
- 0x9f, 0xe3, 0x10, 0xf1, 0x8b, 0xbf, 0x9f, 0x86, 0x9a, 0x51, 0x8e, 0xbb, 0xe4, 0xf9, 0x57, 0x29,
- 0xf3, 0x9d, 0xe6, 0x35, 0xbb, 0x1b, 0x96, 0x58, 0x6a, 0x50, 0xea, 0x0a, 0xe0, 0x1b, 0xc6, 0x29,
- 0x82, 0x6e, 0x88, 0x87, 0x8e, 0x01, 0x88, 0x63, 0x27, 0xc7, 0xe5, 0x81, 0x2a, 0x0a, 0x88, 0x18,
- 0x5e, 0x8e, 0x09, 0xbb, 0xbe, 0x4f, 0xe1, 0x28, 0x21, 0x5f, 0x4e, 0x0a, 0x79, 0xdf, 0x74, 0x42,
- 0xc9, 0x9a, 0xc7, 0x25, 0x17, 0x3f, 0x2e, 0xf8, 0x5f, 0x16, 0x54, 0xd7, 0xf4, 0xce, 0x1f, 0x52,
- 0x1c, 0x9a, 0xdf, 0xf4, 0x23, 0xe2, 0x37, 0xf3, 0x08, 0xf9, 0xcd, 0x26, 0xf8, 0xad, 0x02, 0xac,
- 0x39, 0x2e, 0xbd, 0xe4, 0x74, 0x18, 0xf5, 0x47, 0x5c, 0x92, 0x7e, 0x98, 0x89, 0x3c, 0x0e, 0xa1,
- 0x1b, 0x5a, 0x06, 0xcb, 0x86, 0x9b, 0x7f, 0x14, 0x2c, 0xa6, 0x1f, 0x21, 0x8b, 0x99, 0x84, 0x07,
- 0x74, 0x61, 0x62, 0x43, 0xb0, 0x27, 0x23, 0x76, 0xac, 0x30, 0x1c, 0xf1, 0xde, 0xf8, 0x9a, 0x5a,
- 0xfc, 0xdc, 0x1e, 0x09, 0x97, 0x28, 0xf0, 0xd7, 0x83, 0x6d, 0x97, 0xd9, 0x0f, 0x8c, 0xf9, 0x44,
- 0x2f, 0x82, 0x6c, 0x95, 0xd3, 0xe5, 0x46, 0xe6, 0x74, 0x2f, 0xa8, 0x65, 0xfe, 0x97, 0xbc, 0x0e,
- 0xb7, 0x23, 0x07, 0x2b, 0x94, 0xa2, 0x1c, 0xec, 0xf1, 0xbd, 0x8e, 0xbf, 0x3c, 0xf4, 0xe8, 0x64,
- 0xfc, 0x6a, 0x56, 0x0e, 0xaf, 0x66, 0x2d, 0xfa, 0x20, 0x76, 0x2f, 0xc3, 0x7f, 0xb0, 0x60, 0x7a,
- 0x95, 0xb2, 0x78, 0x36, 0xf6, 0x05, 0x52, 0x3e, 0x7e, 0x09, 0x0e, 0x1a, 0xfb, 0x57, 0x72, 0x7a,
- 0x3a, 0x91, 0x82, 0x1d, 0x8a, 0x24, 0x25, 0x64, 0xa0, 0x6e, 0xb6, 0xf1, 0xec, 0xeb, 0x06, 0x94,
- 0x8c, 0x41, 0x74, 0x21, 0x91, 0x77, 0xcd, 0x24, 0xbe, 0xb4, 0xf0, 0xdc, 0xa1, 0x31, 0xab, 0x78,
- 0x92, 0xf7, 0x57, 0x95, 0x55, 0x87, 0x39, 0xca, 0x3a, 0x20, 0xa1, 0x58, 0x41, 0xd6, 0x8c, 0x92,
- 0x02, 0xfa, 0x72, 0x98, 0x80, 0x85, 0x7d, 0xf4, 0x18, 0x64, 0x7d, 0xef, 0xbe, 0x4e, 0xa8, 0x27,
- 0x8d, 0x42, 0xbc, 0x77, 0x9f, 0x88, 0x21, 0xfc, 0x3c, 0x64, 0x88, 0x77, 0x1f, 0x55, 0x01, 0x7c,
- 0xdb, 0x6d, 0xd3, 0xdb, 0xe1, 0x55, 0xae, 0x4c, 0x0c, 0xc8, 0x98, 0x0c, 0x66, 0x19, 0x0e, 0x9a,
- 0x3b, 0x92, 0xea, 0x5e, 0x84, 0x89, 0x57, 0xfa, 0xa6, 0xb8, 0x66, 0x13, 0xe2, 0x92, 0x15, 0x03,
- 0x8d, 0xc4, 0x6d, 0x06, 0x22, 0x38, 0x3a, 0x0a, 0x45, 0x66, 0xdf, 0xe9, 0xd0, 0x6b, 0x91, 0xb3,
- 0x8c, 0x00, 0x7c, 0x94, 0xdf, 0x42, 0x6f, 0x1b, 0xa9, 0x58, 0x04, 0x40, 0x4f, 0xc2, 0x74, 0xb4,
- 0xe7, 0x1b, 0x3e, 0xdd, 0x70, 0x1e, 0x08, 0x0d, 0x97, 0xc9, 0x10, 0x1c, 0x9d, 0x84, 0x03, 0x11,
- 0x6c, 0x5d, 0xa4, 0x3c, 0x59, 0x81, 0x9a, 0x04, 0x73, 0xd9, 0x08, 0x76, 0x2f, 0xde, 0xed, 0xdb,
- 0x1d, 0x71, 0x4c, 0xcb, 0xc4, 0x80, 0xe0, 0x3f, 0x5a, 0x70, 0x50, 0xaa, 0x9a, 0x9f, 0x81, 0x2f,
- 0xa2, 0xd5, 0xff, 0xd2, 0x02, 0x64, 0x72, 0xa0, 0x4c, 0xeb, 0x4b, 0x66, 0x45, 0x8a, 0xe7, 0x54,
- 0x25, 0x71, 0xb9, 0x96, 0xa0, 0xa8, 0xa8, 0x84, 0x21, 0xdf, 0x94, 0x95, 0x37, 0xf1, 0xf1, 0x40,
- 0xde, 0xde, 0x25, 0x84, 0xa8, 0x7f, 0x54, 0x83, 0xdc, 0x9d, 0x6d, 0x46, 0x03, 0x75, 0xf7, 0x16,
- 0x45, 0x07, 0x01, 0x20, 0xf2, 0x8f, 0xaf, 0x45, 0x5d, 0x26, 0xac, 0x26, 0x1b, 0xad, 0xa5, 0x40,
- 0x44, 0x37, 0xf0, 0x7f, 0xd2, 0x30, 0x79, 0xdb, 0xeb, 0xf4, 0xa3, 0xf0, 0xfa, 0x45, 0x0a, 0x2d,
- 0xb1, 0x82, 0x40, 0x4e, 0x17, 0x04, 0x10, 0x64, 0x03, 0x46, 0x7b, 0xc2, 0xb2, 0x32, 0x44, 0xb4,
- 0x11, 0x86, 0x32, 0xb3, 0xfd, 0x36, 0x65, 0xf2, 0x9a, 0x55, 0xc9, 0x8b, 0xfc, 0x37, 0x06, 0x43,
- 0x0b, 0x50, 0xb2, 0xdb, 0x6d, 0x9f, 0xb6, 0x6d, 0x46, 0x1b, 0xdb, 0x95, 0x09, 0xb1, 0x98, 0x09,
- 0x42, 0x57, 0x60, 0xaa, 0x69, 0x37, 0x37, 0x1d, 0xb7, 0x7d, 0xbd, 0x27, 0xbf, 0x94, 0x14, 0x84,
- 0x07, 0x3f, 0xba, 0x68, 0x7e, 0x2b, 0x5e, 0x5c, 0x8e, 0xe1, 0x28, 0x3f, 0x96, 0x98, 0x89, 0x5f,
- 0x83, 0x29, 0x2d, 0x78, 0x65, 0x1e, 0x67, 0x60, 0xe2, 0x9e, 0x80, 0x8c, 0x28, 0xf6, 0x49, 0x54,
- 0x45, 0x4a, 0xa3, 0xc5, 0x3f, 0x6a, 0x68, 0xfe, 0xf1, 0x15, 0xc8, 0x4b, 0x74, 0x74, 0xd4, 0xbc,
- 0x78, 0xc9, 0xdc, 0x93, 0xf7, 0xd5, 0x2d, 0x0a, 0x43, 0x5e, 0x12, 0x52, 0x46, 0x24, 0xec, 0x4c,
- 0x42, 0x88, 0xfa, 0xc7, 0x3f, 0x4a, 0xc3, 0xa1, 0x15, 0xca, 0xc4, 0x27, 0xc4, 0x4b, 0x0e, 0xed,
- 0xb4, 0x3e, 0xd3, 0x9a, 0x40, 0x58, 0xd9, 0xcb, 0x18, 0x95, 0x3d, 0xee, 0xc3, 0x3a, 0x8e, 0x4b,
- 0xd7, 0x8c, 0xd2, 0x50, 0x04, 0x88, 0x64, 0x94, 0x33, 0x8b, 0x46, 0xda, 0x46, 0xf2, 0x86, 0x8d,
- 0x44, 0x05, 0xc1, 0x89, 0x58, 0x0d, 0x53, 0xdf, 0x40, 0x0b, 0xd1, 0xf5, 0x15, 0xff, 0xce, 0x82,
- 0xb9, 0xa4, 0x5c, 0x94, 0x1a, 0x2f, 0x42, 0x7e, 0x43, 0x40, 0x86, 0xcb, 0xce, 0xb1, 0x19, 0xb2,
- 0x72, 0x21, 0x51, 0xcd, 0xca, 0x85, 0x84, 0xa0, 0x27, 0x62, 0x1f, 0xac, 0x1a, 0x33, 0xbb, 0x83,
- 0xda, 0x01, 0x01, 0x30, 0x70, 0x15, 0x33, 0xa7, 0xc3, 0x8d, 0x67, 0xa2, 0x92, 0x88, 0x84, 0x98,
- 0x84, 0x55, 0x7d, 0xf3, 0xcf, 0x16, 0x4c, 0xc6, 0x36, 0x22, 0x44, 0xc4, 0x8f, 0x80, 0x0a, 0x0f,
- 0xb2, 0x83, 0x9e, 0x80, 0x2c, 0xdb, 0xee, 0xa9, 0xa8, 0xd0, 0x38, 0xf4, 0xf1, 0xa0, 0x76, 0x30,
- 0x36, 0xed, 0xe6, 0x76, 0x8f, 0x12, 0x81, 0xc2, 0x4f, 0x4e, 0xd3, 0xf6, 0x5b, 0x8e, 0x6b, 0x77,
- 0x1c, 0xb6, 0xad, 0xbe, 0xb6, 0x9b, 0x20, 0xee, 0x8e, 0x7a, 0xb6, 0x1f, 0xe8, 0x24, 0xb0, 0x28,
- 0xdd, 0x91, 0x02, 0x11, 0xdd, 0x10, 0xc5, 0x9d, 0x2d, 0xca, 0x9a, 0x9b, 0x32, 0x2c, 0xa8, 0xe2,
- 0x8e, 0x80, 0xc4, 0x8a, 0x3b, 0x02, 0x82, 0x7f, 0x6e, 0x45, 0xc6, 0x29, 0xcf, 0xf0, 0xe7, 0xce,
- 0x38, 0xf1, 0x37, 0x22, 0x3b, 0xd1, 0x5b, 0x54, 0x76, 0xf2, 0x22, 0x4c, 0xb5, 0x62, 0x23, 0xe3,
- 0xed, 0x45, 0x16, 0xae, 0x13, 0xe8, 0xb8, 0x1f, 0xe9, 0x51, 0x40, 0xc6, 0xe8, 0x31, 0xa1, 0x9c,
- 0xf4, 0xb0, 0x72, 0x22, 0xa9, 0x67, 0xf6, 0x96, 0xfa, 0x93, 0xc7, 0xa1, 0x18, 0x7e, 0xa4, 0x44,
- 0x25, 0x98, 0xb8, 0x74, 0x9d, 0xbc, 0x7a, 0x81, 0xac, 0x4c, 0xa7, 0x50, 0x19, 0x0a, 0x8d, 0x0b,
- 0xcb, 0x2f, 0x8b, 0x9e, 0xb5, 0xf4, 0xeb, 0xbc, 0x4e, 0x5c, 0x7c, 0xf4, 0x55, 0xc8, 0xc9, 0x6c,
- 0x64, 0x2e, 0x62, 0xce, 0xfc, 0x7e, 0x37, 0x7f, 0x78, 0x08, 0x2e, 0xa5, 0x84, 0x53, 0x67, 0x2c,
- 0x74, 0x0d, 0x4a, 0x02, 0xa8, 0x2a, 0xe4, 0x47, 0x93, 0x85, 0xea, 0x18, 0xa5, 0x63, 0x63, 0x46,
- 0x0d, 0x7a, 0xe7, 0x21, 0x27, 0x05, 0x36, 0x97, 0x48, 0x1a, 0x47, 0xec, 0x26, 0xf6, 0xcd, 0x00,
- 0xa7, 0xd0, 0x73, 0x90, 0xbd, 0x69, 0x3b, 0x1d, 0x64, 0xe4, 0xac, 0x46, 0x61, 0x7b, 0x7e, 0x2e,
- 0x09, 0x36, 0x96, 0x7d, 0x21, 0xac, 0xcf, 0x1f, 0x4e, 0x16, 0x09, 0xf5, 0xf4, 0xca, 0xf0, 0x40,
- 0xb8, 0xf2, 0x75, 0x59, 0x45, 0xd6, 0xa5, 0x2a, 0x74, 0x2c, 0xbe, 0x54, 0xa2, 0xb2, 0x35, 0x5f,
- 0x1d, 0x37, 0x1c, 0x12, 0x5c, 0x83, 0x92, 0x51, 0x26, 0x32, 0xc5, 0x3a, 0x5c, 0xe3, 0x32, 0xc5,
- 0x3a, 0xa2, 0xb6, 0x84, 0x53, 0x68, 0x15, 0x0a, 0xe2, 0xb1, 0x86, 0xcd, 0x02, 0x74, 0x24, 0x99,
- 0xd0, 0x1b, 0x89, 0xdc, 0xfc, 0xd1, 0xd1, 0x83, 0x21, 0xa1, 0xaf, 0x43, 0x71, 0x95, 0x32, 0x15,
- 0xc1, 0x0e, 0x27, 0x43, 0xe0, 0x08, 0x49, 0xc5, 0xc3, 0x28, 0x4e, 0xa1, 0xd7, 0xc4, 0xa5, 0x23,
- 0xee, 0x9e, 0x51, 0x6d, 0x8c, 0x1b, 0x0e, 0xf7, 0xb5, 0x30, 0x1e, 0x21, 0xa4, 0xfc, 0x6a, 0x8c,
- 0xb2, 0xca, 0x1b, 0x6a, 0x63, 0x0e, 0x6c, 0x48, 0xb9, 0xb6, 0xc7, 0x2b, 0x30, 0x9c, 0x5a, 0x7a,
- 0x5d, 0xbf, 0x34, 0x59, 0xb1, 0x99, 0x8d, 0xae, 0xc3, 0x54, 0xf8, 0xf0, 0x45, 0xbc, 0x94, 0x8a,
- 0xd9, 0xfc, 0xd0, 0xb3, 0xac, 0x98, 0xcd, 0x0f, 0x3f, 0xcf, 0xc2, 0xa9, 0xa5, 0x37, 0x61, 0x56,
- 0x7e, 0xf8, 0x93, 0x8f, 0x8f, 0x2e, 0xf9, 0x9e, 0xcb, 0xb8, 0xcf, 0x22, 0x30, 0x19, 0x7b, 0x95,
- 0x84, 0x0c, 0xab, 0x19, 0xf5, 0x24, 0xca, 0x64, 0x65, 0xe4, 0x73, 0x26, 0x9c, 0x5a, 0xfa, 0xab,
- 0x05, 0x65, 0x73, 0x31, 0x74, 0xcb, 0xe0, 0x46, 0x3c, 0xe3, 0x31, 0x25, 0x36, 0xf2, 0x55, 0x91,
- 0xa9, 0x8b, 0xd1, 0x2f, 0x80, 0x70, 0x0a, 0xbd, 0x29, 0x9e, 0x24, 0x0d, 0x3f, 0x26, 0x41, 0xc7,
- 0xf7, 0x7c, 0xb7, 0x22, 0x17, 0x39, 0xf1, 0x29, 0xdf, 0xb7, 0xe0, 0x54, 0xe3, 0xf5, 0x77, 0x3f,
- 0xa8, 0xa6, 0xde, 0xfb, 0xa0, 0x9a, 0xfa, 0xe8, 0x83, 0xaa, 0xf5, 0xdd, 0x9d, 0xaa, 0xf5, 0xab,
- 0x9d, 0xaa, 0xf5, 0xce, 0x4e, 0xd5, 0x7a, 0x77, 0xa7, 0x6a, 0xfd, 0x63, 0xa7, 0x6a, 0xfd, 0x73,
- 0xa7, 0x9a, 0xfa, 0x68, 0xa7, 0x6a, 0xbd, 0xf5, 0x61, 0x35, 0xf5, 0xee, 0x87, 0xd5, 0xd4, 0x7b,
- 0x1f, 0x56, 0x53, 0xdf, 0x3c, 0xb1, 0x77, 0x29, 0x43, 0x86, 0x95, 0xbc, 0xf8, 0x7b, 0xfa, 0xbf,
- 0x01, 0x00, 0x00, 0xff, 0xff, 0xe5, 0xe6, 0x69, 0x24, 0x20, 0x29, 0x00, 0x00,
+ 0xd4, 0x73, 0x81, 0xa2, 0x41, 0x8b, 0xa2, 0xed, 0xa5, 0x40, 0x81, 0x02, 0x2d, 0x0a, 0xe4, 0x52,
+ 0xf4, 0xd0, 0x43, 0xd1, 0x5e, 0x0a, 0x34, 0xbd, 0xa5, 0xa7, 0x06, 0x39, 0xb0, 0x8d, 0x72, 0x29,
+ 0x04, 0x14, 0xc8, 0xa9, 0x05, 0x72, 0x2a, 0xe6, 0xb7, 0x3b, 0xbb, 0x24, 0xa3, 0xc8, 0x75, 0x91,
+ 0xe4, 0x42, 0xee, 0xbc, 0x79, 0xf3, 0x66, 0xde, 0x67, 0xde, 0x7b, 0xf3, 0x66, 0xe0, 0x48, 0x6f,
+ 0xab, 0x5d, 0xef, 0x78, 0xed, 0x9e, 0xef, 0x31, 0x2f, 0xfc, 0x58, 0x14, 0xbf, 0xa8, 0xa0, 0xdb,
+ 0xf3, 0xb3, 0x6d, 0xaf, 0xed, 0x49, 0x1c, 0xfe, 0x25, 0xfb, 0xe7, 0x6b, 0x6d, 0xcf, 0x6b, 0x77,
+ 0x68, 0x5d, 0xb4, 0xee, 0xf4, 0x37, 0xea, 0xcc, 0xe9, 0xd2, 0x80, 0xd9, 0xdd, 0x9e, 0x42, 0x58,
+ 0x50, 0xd4, 0xef, 0x76, 0xba, 0x5e, 0x8b, 0x76, 0xea, 0x01, 0xb3, 0x59, 0x20, 0x7f, 0x15, 0xc6,
+ 0x0c, 0xc7, 0xe8, 0xf5, 0x83, 0x4d, 0xf1, 0xa3, 0x80, 0x67, 0x38, 0x30, 0x60, 0x9e, 0x6f, 0xb7,
+ 0x69, 0xbd, 0xb9, 0xd9, 0x77, 0xb7, 0xea, 0x4d, 0xbb, 0xb9, 0x49, 0xeb, 0x3e, 0x0d, 0xfa, 0x1d,
+ 0x16, 0xc8, 0x06, 0xdb, 0xee, 0x51, 0x45, 0x06, 0xff, 0xd6, 0x82, 0x43, 0x6b, 0xf6, 0x1d, 0xda,
+ 0xb9, 0xe9, 0xdd, 0xb6, 0x3b, 0x7d, 0x1a, 0x10, 0x1a, 0xf4, 0x3c, 0x37, 0xa0, 0x68, 0x19, 0xf2,
+ 0x1d, 0xde, 0x11, 0x54, 0xac, 0x85, 0xcc, 0xc9, 0xd2, 0xd2, 0xa9, 0xc5, 0x90, 0xc9, 0x91, 0x03,
+ 0x24, 0x34, 0xb8, 0xe8, 0x32, 0x7f, 0x9b, 0xa8, 0xa1, 0xf3, 0xb7, 0xa1, 0x64, 0x80, 0xd1, 0x34,
+ 0x64, 0xb6, 0xe8, 0x76, 0xc5, 0x5a, 0xb0, 0x4e, 0x16, 0x09, 0xff, 0x44, 0x67, 0x21, 0x77, 0x8f,
+ 0x93, 0xa9, 0xa4, 0x17, 0xac, 0x93, 0xa5, 0xa5, 0x23, 0xd1, 0x24, 0xb7, 0x5c, 0xe7, 0x6e, 0x9f,
+ 0x8a, 0xd1, 0x6a, 0x22, 0x89, 0x79, 0x3e, 0xfd, 0xac, 0x85, 0x4f, 0xc1, 0xc1, 0xa1, 0x7e, 0x34,
+ 0x07, 0x79, 0x81, 0x21, 0x57, 0x5c, 0x24, 0xaa, 0x85, 0x67, 0x01, 0xad, 0x33, 0x9f, 0xda, 0x5d,
+ 0x62, 0x33, 0xbe, 0xde, 0xbb, 0x7d, 0x1a, 0x30, 0x7c, 0x15, 0x66, 0x62, 0x50, 0xc5, 0xf6, 0x39,
+ 0x28, 0x05, 0x11, 0x58, 0xf1, 0x3e, 0x1b, 0x2d, 0x2b, 0x1a, 0x43, 0x4c, 0x44, 0xfc, 0x5d, 0x0b,
+ 0xa6, 0x64, 0xdf, 0x55, 0xca, 0xec, 0x96, 0xcd, 0x6c, 0x54, 0x05, 0x90, 0x18, 0x2f, 0xd9, 0xc1,
+ 0xa6, 0x60, 0x3a, 0x4b, 0x0c, 0x08, 0x5a, 0x80, 0x12, 0x75, 0x99, 0xef, 0xd0, 0x60, 0xdd, 0xf9,
+ 0xb6, 0x94, 0x40, 0x96, 0x98, 0x20, 0x74, 0x0e, 0xe6, 0x02, 0xe6, 0xf7, 0x9b, 0xac, 0xef, 0xd3,
+ 0x96, 0xa6, 0x2b, 0x90, 0x33, 0x02, 0x79, 0x4c, 0x2f, 0xbe, 0x03, 0xb3, 0x17, 0x1f, 0x34, 0x29,
+ 0x6d, 0x05, 0x6b, 0x4e, 0xd7, 0x61, 0x9a, 0x67, 0x2e, 0x21, 0x46, 0x5d, 0xdb, 0x65, 0x4a, 0x05,
+ 0xaa, 0x85, 0x96, 0x60, 0x42, 0xae, 0x2b, 0xa8, 0xa4, 0x05, 0xc3, 0x95, 0x24, 0xc3, 0x9a, 0x3c,
+ 0xd1, 0x88, 0x38, 0x80, 0x43, 0x89, 0x39, 0x94, 0x04, 0xc7, 0x4d, 0xd2, 0x80, 0x03, 0x3e, 0x7d,
+ 0x93, 0x36, 0x19, 0x6d, 0xad, 0x8f, 0x9b, 0x8c, 0xc4, 0x10, 0x48, 0x72, 0x00, 0x7e, 0x09, 0xa6,
+ 0xe2, 0x28, 0x7b, 0x0a, 0x79, 0x0e, 0xf2, 0x3e, 0xb5, 0x03, 0xcf, 0x15, 0xf2, 0x2d, 0x12, 0xd5,
+ 0xe2, 0xcb, 0x5f, 0xa5, 0x4c, 0x12, 0xb9, 0x15, 0xd8, 0x6d, 0xba, 0x97, 0x8c, 0xaa, 0x00, 0x3d,
+ 0xdb, 0x67, 0x0e, 0x73, 0x3c, 0x57, 0xae, 0x3c, 0x47, 0x0c, 0x08, 0xc2, 0x50, 0x8e, 0xa6, 0xa5,
+ 0x41, 0x25, 0xb3, 0x90, 0x39, 0x99, 0x25, 0x31, 0x18, 0xfe, 0x81, 0x05, 0x73, 0xc9, 0x59, 0xf7,
+ 0x90, 0xda, 0xe3, 0x30, 0x69, 0x37, 0x99, 0x73, 0x8f, 0x46, 0x32, 0xe3, 0x2c, 0xc6, 0x81, 0x08,
+ 0x41, 0xd6, 0xb7, 0x99, 0x34, 0x8b, 0x0c, 0x11, 0xdf, 0xe8, 0x38, 0x4c, 0xf5, 0xdd, 0x2d, 0xd7,
+ 0xbb, 0xef, 0xea, 0xa1, 0x59, 0xb1, 0xa4, 0x04, 0x14, 0xff, 0xcc, 0x02, 0x88, 0xac, 0x7a, 0x4f,
+ 0x81, 0x9e, 0x86, 0x83, 0x51, 0xeb, 0x9a, 0xb7, 0xbe, 0x69, 0xfb, 0x2d, 0xb5, 0xa8, 0xe1, 0x8e,
+ 0x91, 0x0b, 0x8b, 0x58, 0xcd, 0xc6, 0x58, 0x9d, 0x83, 0x3c, 0xf7, 0x65, 0x34, 0xa8, 0xe4, 0x16,
+ 0xac, 0x93, 0x93, 0x44, 0xb5, 0x70, 0x15, 0x8e, 0xae, 0x52, 0x76, 0x21, 0x08, 0x9c, 0xb6, 0x4b,
+ 0x5b, 0x37, 0x42, 0x91, 0xeb, 0x9d, 0xfc, 0x37, 0x0b, 0x8e, 0x8d, 0x41, 0x50, 0xc2, 0xf5, 0x00,
+ 0xd9, 0x43, 0xbd, 0x6a, 0x6f, 0xbf, 0x18, 0x59, 0xdf, 0x27, 0x12, 0x59, 0x1c, 0xee, 0x92, 0xbe,
+ 0x6e, 0x04, 0xe9, 0xf9, 0x8b, 0x70, 0x78, 0x0c, 0xba, 0xe9, 0x03, 0x73, 0xd2, 0x07, 0xce, 0x9a,
+ 0x3e, 0x30, 0x63, 0xba, 0xb9, 0x7f, 0x67, 0xa0, 0xfc, 0x4a, 0x9f, 0xfa, 0xdb, 0xda, 0x38, 0xab,
+ 0x50, 0x08, 0x68, 0x87, 0x36, 0x99, 0xe7, 0x4b, 0x3b, 0x69, 0xa4, 0x2b, 0x16, 0x09, 0x61, 0x9c,
+ 0x54, 0x87, 0xef, 0x46, 0x41, 0x6a, 0x92, 0xc8, 0x06, 0x3a, 0x0f, 0xb9, 0x80, 0xd9, 0x3e, 0x13,
+ 0x5a, 0x28, 0x2d, 0xcd, 0x2f, 0xca, 0xf0, 0xb3, 0xa8, 0xc3, 0xcf, 0xe2, 0x4d, 0x1d, 0x7e, 0x1a,
+ 0x85, 0x77, 0x06, 0xb5, 0xd4, 0x5b, 0x7f, 0xaf, 0x59, 0x44, 0x0e, 0x41, 0xe7, 0x20, 0x43, 0xdd,
+ 0x96, 0xd0, 0xd4, 0xa7, 0x1d, 0xc9, 0x07, 0xa0, 0xb3, 0x50, 0x6c, 0x39, 0x3e, 0x6d, 0x72, 0xce,
+ 0x85, 0x3e, 0xa7, 0x96, 0x66, 0x22, 0x49, 0xaf, 0xe8, 0x2e, 0x12, 0x61, 0xa1, 0xd3, 0x90, 0x0f,
+ 0xb8, 0xd1, 0x04, 0x95, 0x09, 0xee, 0xbf, 0x1b, 0xb3, 0xbb, 0x83, 0xda, 0xb4, 0x84, 0x9c, 0xf6,
+ 0xba, 0x0e, 0xa3, 0xdd, 0x1e, 0xdb, 0x26, 0x0a, 0x07, 0x3d, 0x09, 0x13, 0x2d, 0xda, 0xa1, 0xdc,
+ 0x49, 0x17, 0x84, 0x22, 0xa7, 0x0d, 0xf2, 0xa2, 0x83, 0x68, 0x04, 0xf4, 0x3a, 0x64, 0x7b, 0x1d,
+ 0xdb, 0xad, 0x14, 0x05, 0x17, 0x53, 0x11, 0xe2, 0x8d, 0x8e, 0xed, 0x36, 0x9e, 0x7b, 0x7f, 0x50,
+ 0x7b, 0xa6, 0xed, 0xb0, 0xcd, 0xfe, 0x9d, 0xc5, 0xa6, 0xd7, 0xad, 0xb7, 0x7d, 0x7b, 0xc3, 0x76,
+ 0xed, 0x7a, 0xc7, 0xdb, 0x72, 0xea, 0xf7, 0x9e, 0xae, 0xf3, 0xa0, 0x7a, 0xb7, 0x4f, 0x7d, 0x87,
+ 0xfa, 0x75, 0x4e, 0x66, 0x51, 0xa8, 0x84, 0x0f, 0x25, 0x82, 0x2c, 0xba, 0xc2, 0x63, 0x86, 0xe7,
+ 0xd3, 0x65, 0x1e, 0x71, 0x83, 0x0a, 0x88, 0x59, 0x0e, 0x47, 0xb3, 0x08, 0x38, 0xa1, 0x1b, 0xab,
+ 0xbe, 0xd7, 0xef, 0x35, 0x0e, 0xec, 0x0e, 0x6a, 0x26, 0x3e, 0x31, 0x1b, 0x57, 0xb2, 0x85, 0xfc,
+ 0xf4, 0x04, 0x7e, 0x3b, 0x03, 0x68, 0xdd, 0xee, 0xf6, 0x3a, 0x74, 0x5f, 0xea, 0x0f, 0x15, 0x9d,
+ 0x7e, 0x68, 0x45, 0x67, 0xf6, 0xab, 0xe8, 0x48, 0x6b, 0xd9, 0xfd, 0x69, 0x2d, 0xf7, 0x69, 0xb5,
+ 0x96, 0xff, 0xdc, 0x6b, 0x0d, 0x57, 0x20, 0xcb, 0x29, 0xf3, 0xcd, 0xed, 0xdb, 0xf7, 0x85, 0x6e,
+ 0xca, 0x84, 0x7f, 0xe2, 0x35, 0xc8, 0x4b, 0xbe, 0xd0, 0x7c, 0x52, 0x79, 0xf1, 0x7d, 0x1b, 0x29,
+ 0x2e, 0xa3, 0x55, 0x32, 0x1d, 0xa9, 0x24, 0x23, 0x84, 0x8d, 0x7f, 0x6f, 0xc1, 0xa4, 0xb2, 0x08,
+ 0xe5, 0xda, 0xee, 0x44, 0xa1, 0x5b, 0xfa, 0xb3, 0xc3, 0xc9, 0xd0, 0x7d, 0xa1, 0x65, 0xf7, 0x18,
+ 0xf5, 0x1b, 0xf5, 0x77, 0x06, 0x35, 0xeb, 0xfd, 0x41, 0xed, 0xc4, 0x38, 0xa1, 0xe9, 0x8c, 0x52,
+ 0xe7, 0x38, 0x9a, 0x30, 0x3a, 0x25, 0x56, 0xc7, 0x02, 0x65, 0x56, 0x07, 0x16, 0x65, 0x22, 0x7a,
+ 0xd9, 0x6d, 0xd3, 0x80, 0x53, 0xce, 0x72, 0x8b, 0x20, 0x12, 0x87, 0xb3, 0x79, 0xdf, 0xf6, 0x5d,
+ 0xc7, 0x6d, 0xcb, 0x18, 0x58, 0x24, 0x61, 0x1b, 0xff, 0xc4, 0x82, 0x99, 0x98, 0x59, 0x2b, 0x26,
+ 0x9e, 0x85, 0x7c, 0xc0, 0x35, 0xa5, 0x79, 0x30, 0x8c, 0x62, 0x5d, 0xc0, 0x1b, 0x53, 0x6a, 0xf1,
+ 0x79, 0xd9, 0x26, 0x0a, 0xff, 0xd1, 0x2d, 0xed, 0x4f, 0x16, 0x94, 0x45, 0x32, 0xa9, 0xf7, 0x1a,
+ 0x82, 0xac, 0x6b, 0x77, 0xa9, 0x52, 0x95, 0xf8, 0x36, 0x32, 0x4c, 0x3e, 0x5d, 0x41, 0x67, 0x98,
+ 0xfb, 0x75, 0xb0, 0xd6, 0x43, 0x3b, 0x58, 0x2b, 0xda, 0x77, 0xb3, 0x90, 0xe3, 0xe6, 0xbd, 0x2d,
+ 0x9c, 0x6b, 0x91, 0xc8, 0x06, 0x3e, 0x01, 0x93, 0x8a, 0x8b, 0x28, 0xaf, 0x18, 0x99, 0x14, 0x77,
+ 0x21, 0x2f, 0x35, 0x81, 0x1e, 0x87, 0x62, 0x78, 0xfc, 0x10, 0xdc, 0x66, 0x1a, 0xf9, 0xdd, 0x41,
+ 0x2d, 0xcd, 0x02, 0x12, 0x75, 0xa0, 0x9a, 0x19, 0xa4, 0xac, 0x46, 0x71, 0x77, 0x50, 0x93, 0x00,
+ 0x15, 0xaf, 0xd0, 0x51, 0xc8, 0x6e, 0xf2, 0x8c, 0x41, 0x64, 0xa6, 0x8d, 0xc2, 0xee, 0xa0, 0x26,
+ 0xda, 0x44, 0xfc, 0xe2, 0x55, 0x28, 0xaf, 0xd1, 0xb6, 0xdd, 0xdc, 0x56, 0x93, 0x86, 0x31, 0x8f,
+ 0x4f, 0x68, 0x69, 0x1a, 0x8f, 0x41, 0x39, 0x9c, 0xf1, 0x0d, 0x95, 0xeb, 0x64, 0x48, 0x29, 0x84,
+ 0x5d, 0x0d, 0xf0, 0x4f, 0x2d, 0x50, 0x36, 0x80, 0xb0, 0x71, 0x42, 0xe1, 0xbe, 0x10, 0x76, 0x07,
+ 0x35, 0x05, 0xd1, 0x07, 0x10, 0xf4, 0x3c, 0x4c, 0x04, 0x62, 0x46, 0x9d, 0x6c, 0x9a, 0xa6, 0x25,
+ 0x3a, 0x1a, 0x07, 0xb8, 0x89, 0xec, 0x0e, 0x6a, 0x1a, 0x91, 0xe8, 0x0f, 0xb4, 0x18, 0x4b, 0x85,
+ 0x24, 0x63, 0x53, 0xbb, 0x83, 0x9a, 0x01, 0x35, 0x53, 0x23, 0xfc, 0xb1, 0x05, 0xa5, 0x9b, 0xb6,
+ 0x13, 0x9a, 0x50, 0x45, 0xab, 0x28, 0xf2, 0xd5, 0x12, 0xc0, 0x2d, 0xb1, 0x45, 0x3b, 0xf6, 0xf6,
+ 0x25, 0xcf, 0x17, 0x74, 0x27, 0x49, 0xd8, 0x8e, 0x62, 0x78, 0x76, 0x64, 0x0c, 0xcf, 0xed, 0xdf,
+ 0xb5, 0xff, 0x7f, 0x1d, 0xe9, 0x95, 0x6c, 0x21, 0x3d, 0x9d, 0xc1, 0x6f, 0x5b, 0x50, 0x96, 0xcc,
+ 0x2b, 0xcb, 0xfb, 0x16, 0xe4, 0xa5, 0x6c, 0x04, 0xfb, 0x9f, 0xe0, 0x98, 0x4e, 0xed, 0xc7, 0x29,
+ 0x29, 0x9a, 0xe8, 0x45, 0x98, 0x6a, 0xf9, 0x5e, 0xaf, 0x97, 0x3c, 0x4c, 0x18, 0xb3, 0xac, 0x98,
+ 0xfd, 0x24, 0x81, 0x8e, 0xff, 0x62, 0xc1, 0xa4, 0x72, 0x26, 0x4a, 0x5d, 0xa1, 0x88, 0xad, 0x87,
+ 0x8e, 0x9e, 0xe9, 0xfd, 0x46, 0xcf, 0x39, 0xc8, 0xb7, 0x79, 0x7c, 0xd1, 0x0e, 0x49, 0xb5, 0xf6,
+ 0x17, 0x55, 0xf1, 0x15, 0x98, 0xd2, 0xac, 0x8c, 0xf1, 0xa8, 0xf3, 0x49, 0x8f, 0x7a, 0xb9, 0x45,
+ 0x5d, 0xe6, 0x6c, 0x38, 0xa1, 0x8f, 0x54, 0xf8, 0xf8, 0xfb, 0x16, 0x4c, 0x27, 0x51, 0xd0, 0x4a,
+ 0xa2, 0x18, 0x70, 0x7c, 0x3c, 0x39, 0xb3, 0x0e, 0xa0, 0x49, 0xab, 0x6a, 0xc0, 0x33, 0x7b, 0x55,
+ 0x03, 0x62, 0x99, 0x70, 0x51, 0x79, 0x05, 0xfc, 0x63, 0x0b, 0x26, 0x63, 0xba, 0x44, 0xcf, 0x42,
+ 0x76, 0xc3, 0xf7, 0xba, 0xfb, 0x52, 0x94, 0x18, 0x81, 0xbe, 0x0c, 0x69, 0xe6, 0xed, 0x4b, 0x4d,
+ 0x69, 0xe6, 0x71, 0x2d, 0x29, 0xf6, 0x33, 0xf2, 0xc4, 0x22, 0x5b, 0xf8, 0x19, 0x28, 0x0a, 0x86,
+ 0x6e, 0xd8, 0x8e, 0x3f, 0x32, 0x60, 0x8c, 0x66, 0xe8, 0x79, 0x38, 0x20, 0x9d, 0xe1, 0xe8, 0xc1,
+ 0xe5, 0x51, 0x83, 0xcb, 0x7a, 0xf0, 0x11, 0xc8, 0x89, 0xa4, 0x83, 0x0f, 0xe1, 0x27, 0x72, 0x3d,
+ 0x84, 0x7f, 0xe3, 0x43, 0x30, 0xc3, 0xf7, 0x20, 0xf5, 0x83, 0x65, 0xaf, 0xef, 0x32, 0x7d, 0x42,
+ 0x3a, 0x0d, 0xb3, 0x71, 0xb0, 0xb2, 0x92, 0x59, 0xc8, 0x35, 0x39, 0x40, 0xd0, 0x98, 0x24, 0xb2,
+ 0x81, 0x7f, 0x61, 0x01, 0x5a, 0xa5, 0x4c, 0xcc, 0x72, 0x79, 0x25, 0xdc, 0x1e, 0xf3, 0x50, 0xe8,
+ 0xda, 0xac, 0xb9, 0x49, 0xfd, 0x40, 0xe7, 0x2f, 0xba, 0xfd, 0x59, 0x24, 0x9e, 0xf8, 0x2c, 0xcc,
+ 0xc4, 0x56, 0xa9, 0x78, 0x9a, 0x87, 0x42, 0x53, 0xc1, 0x54, 0xc8, 0x0b, 0xdb, 0xf8, 0x37, 0x69,
+ 0x28, 0xe8, 0xb4, 0x0e, 0x9d, 0x85, 0xd2, 0x86, 0xe3, 0xb6, 0xa9, 0xdf, 0xf3, 0x1d, 0x25, 0x82,
+ 0xac, 0x4c, 0xf3, 0x0c, 0x30, 0x31, 0x1b, 0xe8, 0x29, 0x98, 0xe8, 0x07, 0xd4, 0x7f, 0xc3, 0x91,
+ 0x3b, 0xbd, 0xd8, 0x98, 0xdd, 0x19, 0xd4, 0xf2, 0xb7, 0x02, 0xea, 0x5f, 0x5e, 0xe1, 0xc1, 0xa7,
+ 0x2f, 0xbe, 0x88, 0xfc, 0x6f, 0xa1, 0x97, 0x95, 0x99, 0x8a, 0x04, 0xae, 0xf1, 0x15, 0xbe, 0xfc,
+ 0x84, 0xab, 0xeb, 0xf9, 0x5e, 0x97, 0xb2, 0x4d, 0xda, 0x0f, 0xea, 0x4d, 0xaf, 0xdb, 0xf5, 0xdc,
+ 0xba, 0xa8, 0xf7, 0x09, 0xa6, 0x79, 0x04, 0xe5, 0xc3, 0x95, 0xe5, 0xde, 0x84, 0x09, 0xb6, 0xe9,
+ 0x7b, 0xfd, 0xf6, 0xa6, 0x08, 0x0c, 0x99, 0xc6, 0xf9, 0xfd, 0xd3, 0xd3, 0x14, 0x88, 0xfe, 0x40,
+ 0x8f, 0x71, 0x69, 0xd1, 0xe6, 0x56, 0xd0, 0xef, 0xca, 0x53, 0x77, 0x23, 0xb7, 0x3b, 0xa8, 0x59,
+ 0x4f, 0x91, 0x10, 0x8c, 0x2f, 0xc0, 0x64, 0x2c, 0x15, 0x46, 0x67, 0x20, 0xeb, 0xd3, 0x0d, 0xed,
+ 0x0a, 0xd0, 0x70, 0xc6, 0x2c, 0xa3, 0x3f, 0xc7, 0x21, 0xe2, 0x17, 0x7f, 0x2f, 0x0d, 0x35, 0xa3,
+ 0x52, 0x77, 0xc9, 0xf3, 0xaf, 0x52, 0xe6, 0x3b, 0xcd, 0x6b, 0x76, 0x37, 0xac, 0xbb, 0xd4, 0xa0,
+ 0xd4, 0x15, 0xc0, 0x37, 0x8c, 0x5d, 0x04, 0xdd, 0x10, 0x0f, 0x1d, 0x03, 0x10, 0xdb, 0x4e, 0xf6,
+ 0xcb, 0x0d, 0x55, 0x14, 0x10, 0xd1, 0xbd, 0x1c, 0x13, 0x76, 0x7d, 0x9f, 0xc2, 0x51, 0x42, 0xbe,
+ 0x9c, 0x14, 0xf2, 0xbe, 0xe9, 0x84, 0x92, 0x35, 0xb7, 0x4b, 0x2e, 0xbe, 0x5d, 0xf0, 0xbf, 0x2c,
+ 0xa8, 0xae, 0xe9, 0x95, 0x3f, 0xa4, 0x38, 0x34, 0xbf, 0xe9, 0x47, 0xc4, 0x6f, 0xe6, 0x11, 0xf2,
+ 0x9b, 0x4d, 0xf0, 0x5b, 0x05, 0x58, 0x73, 0x5c, 0x7a, 0xc9, 0xe9, 0x30, 0xea, 0x8f, 0x38, 0x24,
+ 0xfd, 0x30, 0x13, 0x79, 0x1c, 0x42, 0x37, 0xb4, 0x0c, 0x96, 0x0d, 0x37, 0xff, 0x28, 0x58, 0x4c,
+ 0x3f, 0x42, 0x16, 0x33, 0x09, 0x0f, 0xe8, 0xc2, 0xc4, 0x86, 0x60, 0x4f, 0x46, 0xec, 0x58, 0xcd,
+ 0x38, 0xe2, 0xbd, 0xf1, 0x35, 0x35, 0xf9, 0xb9, 0x3d, 0x12, 0x2e, 0x51, 0xfb, 0xaf, 0x07, 0xdb,
+ 0x2e, 0xb3, 0x1f, 0x18, 0xe3, 0x89, 0x9e, 0x04, 0xd9, 0x2a, 0xa7, 0xcb, 0x8d, 0xcc, 0xe9, 0x5e,
+ 0x50, 0xd3, 0xfc, 0x2f, 0x79, 0x1d, 0x6e, 0x47, 0x0e, 0x56, 0x28, 0x45, 0x39, 0xd8, 0xe3, 0x7b,
+ 0x6d, 0x7f, 0xb9, 0xe9, 0xd1, 0xc9, 0xf8, 0xd1, 0xac, 0x1c, 0x1e, 0xcd, 0x5a, 0xf4, 0x41, 0xec,
+ 0x5c, 0x86, 0xff, 0x60, 0xc1, 0xf4, 0x2a, 0x65, 0xf1, 0x6c, 0xec, 0x0b, 0xa4, 0x7c, 0xfc, 0x12,
+ 0x1c, 0x34, 0xd6, 0xaf, 0xe4, 0xf4, 0x74, 0x22, 0x05, 0x3b, 0x14, 0x49, 0x4a, 0xc8, 0x40, 0x9d,
+ 0x6c, 0xe3, 0xd9, 0xd7, 0x0d, 0x28, 0x19, 0x9d, 0xe8, 0x42, 0x22, 0xef, 0x9a, 0x49, 0x5c, 0xc2,
+ 0xf0, 0xdc, 0xa1, 0x31, 0xab, 0x78, 0x92, 0xe7, 0x57, 0x95, 0x55, 0x87, 0x39, 0xca, 0x3a, 0x20,
+ 0xa1, 0x58, 0x41, 0xd6, 0x8c, 0x92, 0x02, 0xfa, 0x72, 0x98, 0x80, 0x85, 0x6d, 0xf4, 0x18, 0x64,
+ 0x7d, 0xef, 0xbe, 0x4e, 0xa8, 0x27, 0x8d, 0xea, 0xbc, 0x77, 0x9f, 0x88, 0x2e, 0xfc, 0x3c, 0x64,
+ 0x88, 0x77, 0x1f, 0x55, 0x01, 0x7c, 0xdb, 0x6d, 0xd3, 0xdb, 0xe1, 0x51, 0xae, 0x4c, 0x0c, 0xc8,
+ 0x98, 0x0c, 0x66, 0x19, 0x0e, 0x9a, 0x2b, 0x92, 0xea, 0x5e, 0x84, 0x89, 0x57, 0xfa, 0xa6, 0xb8,
+ 0x66, 0x13, 0xe2, 0x92, 0x15, 0x03, 0x8d, 0xc4, 0x6d, 0x06, 0x22, 0x38, 0x3a, 0x0a, 0x45, 0x66,
+ 0xdf, 0xe9, 0xd0, 0x6b, 0x91, 0xb3, 0x8c, 0x00, 0xbc, 0x97, 0x9f, 0x42, 0x6f, 0x1b, 0xa9, 0x58,
+ 0x04, 0x40, 0x4f, 0xc2, 0x74, 0xb4, 0xe6, 0x1b, 0x3e, 0xdd, 0x70, 0x1e, 0x08, 0x0d, 0x97, 0xc9,
+ 0x10, 0x1c, 0x9d, 0x84, 0x03, 0x11, 0x6c, 0x5d, 0xa4, 0x3c, 0x59, 0x81, 0x9a, 0x04, 0x73, 0xd9,
+ 0x08, 0x76, 0x2f, 0xde, 0xed, 0xdb, 0x1d, 0xb1, 0x4d, 0xcb, 0xc4, 0x80, 0xe0, 0x3f, 0x5a, 0x70,
+ 0x50, 0xaa, 0x9a, 0xef, 0x81, 0x2f, 0xa2, 0xd5, 0xff, 0xd2, 0x02, 0x64, 0x72, 0xa0, 0x4c, 0xeb,
+ 0x4b, 0x66, 0x45, 0x8a, 0xe7, 0x54, 0x25, 0x71, 0xb8, 0x96, 0xa0, 0xa8, 0xa8, 0x84, 0x21, 0xdf,
+ 0x94, 0x95, 0x37, 0x71, 0x79, 0x20, 0x4f, 0xef, 0x12, 0x42, 0xd4, 0x3f, 0xaa, 0x41, 0xee, 0xce,
+ 0x36, 0xa3, 0x81, 0x3a, 0x7b, 0x8b, 0xa2, 0x83, 0x00, 0x10, 0xf9, 0xc7, 0xe7, 0x52, 0xf7, 0x65,
+ 0x42, 0x0d, 0x6a, 0x2e, 0x05, 0x22, 0xfa, 0x03, 0xff, 0x27, 0x0d, 0x93, 0xb7, 0xbd, 0x4e, 0x3f,
+ 0x0a, 0xaf, 0x5f, 0xa4, 0xd0, 0x12, 0x2b, 0x08, 0xe4, 0x74, 0x41, 0x00, 0x41, 0x36, 0x60, 0xb4,
+ 0x27, 0x2c, 0x2b, 0x43, 0xc4, 0x37, 0xc2, 0x50, 0x66, 0xb6, 0xdf, 0xa6, 0x4c, 0x1e, 0xb3, 0x2a,
+ 0x79, 0x91, 0xff, 0xc6, 0x60, 0x68, 0x01, 0x4a, 0x76, 0xbb, 0xed, 0xd3, 0xb6, 0xcd, 0x68, 0x63,
+ 0xbb, 0x32, 0x21, 0x26, 0x33, 0x41, 0xe8, 0x0a, 0x4c, 0x35, 0xed, 0xe6, 0xa6, 0xe3, 0xb6, 0xaf,
+ 0xf7, 0xe4, 0x4d, 0x49, 0x41, 0x78, 0xf0, 0xa3, 0x8b, 0xe6, 0x35, 0xf2, 0xe2, 0x72, 0x0c, 0x47,
+ 0xf9, 0xb1, 0xc4, 0x48, 0xfc, 0x1a, 0x4c, 0x69, 0xc1, 0x2b, 0xf3, 0x38, 0x03, 0x13, 0xf7, 0x04,
+ 0x64, 0x44, 0xb1, 0x4f, 0xa2, 0x2a, 0x52, 0x1a, 0x2d, 0x7e, 0xa9, 0xa1, 0xf9, 0xc7, 0x57, 0x20,
+ 0x2f, 0xd1, 0xd1, 0x51, 0xf3, 0xe0, 0x25, 0x73, 0x4f, 0xde, 0x56, 0xa7, 0x28, 0x0c, 0x79, 0x49,
+ 0x48, 0x19, 0x91, 0xb0, 0x33, 0x09, 0x21, 0xea, 0x1f, 0xff, 0x28, 0x0d, 0x87, 0x56, 0x28, 0x13,
+ 0xf7, 0x8a, 0x97, 0x1c, 0xda, 0x69, 0x7d, 0xa6, 0x35, 0x81, 0xb0, 0xb2, 0x97, 0x31, 0x2a, 0x7b,
+ 0xdc, 0x87, 0x75, 0x1c, 0x97, 0xae, 0x19, 0xa5, 0xa1, 0x08, 0x10, 0xc9, 0x28, 0x67, 0x16, 0x8d,
+ 0xb4, 0x8d, 0xe4, 0x0d, 0x1b, 0x89, 0x0a, 0x82, 0x13, 0xb1, 0x1a, 0xa6, 0x3e, 0x81, 0x16, 0xa2,
+ 0xe3, 0x2b, 0xfe, 0x9d, 0x05, 0x73, 0x49, 0xb9, 0x28, 0x35, 0x5e, 0x84, 0xfc, 0x86, 0x80, 0x0c,
+ 0x97, 0x9d, 0x63, 0x23, 0x64, 0xe5, 0x42, 0xa2, 0x9a, 0x95, 0x0b, 0x09, 0x41, 0x4f, 0xc4, 0x2e,
+ 0xac, 0x1a, 0x33, 0xbb, 0x83, 0xda, 0x01, 0x01, 0x30, 0x70, 0x15, 0x33, 0xa7, 0xc3, 0x85, 0x67,
+ 0xa2, 0x92, 0x88, 0x84, 0x98, 0x84, 0x55, 0x7d, 0xf3, 0xcf, 0x16, 0x4c, 0xc6, 0x16, 0x22, 0x44,
+ 0xc4, 0xb7, 0x80, 0x0a, 0x0f, 0xb2, 0x81, 0x9e, 0x80, 0x2c, 0xdb, 0xee, 0xa9, 0xa8, 0xd0, 0x38,
+ 0xf4, 0xf1, 0xa0, 0x76, 0x30, 0x36, 0xec, 0xe6, 0x76, 0x8f, 0x12, 0x81, 0xc2, 0x77, 0x4e, 0xd3,
+ 0xf6, 0x5b, 0x8e, 0x6b, 0x77, 0x1c, 0xb6, 0xad, 0xae, 0xe0, 0x4d, 0x10, 0x77, 0x47, 0x3d, 0xdb,
+ 0x0f, 0x74, 0x12, 0x58, 0x94, 0xee, 0x48, 0x81, 0x88, 0xfe, 0x10, 0xc5, 0x9d, 0x2d, 0xca, 0x9a,
+ 0x9b, 0x32, 0x2c, 0xa8, 0xe2, 0x8e, 0x80, 0xc4, 0x8a, 0x3b, 0x02, 0x82, 0x7f, 0x6e, 0x45, 0xc6,
+ 0x29, 0xf7, 0xf0, 0xe7, 0xce, 0x38, 0xf1, 0x37, 0x22, 0x3b, 0xd1, 0x4b, 0x54, 0x76, 0xf2, 0x22,
+ 0x4c, 0xb5, 0x62, 0x3d, 0xe3, 0xed, 0x45, 0x16, 0xae, 0x13, 0xe8, 0xb8, 0x1f, 0xe9, 0x51, 0x40,
+ 0xc6, 0xe8, 0x31, 0xa1, 0x9c, 0xf4, 0xb0, 0x72, 0x22, 0xa9, 0x67, 0xf6, 0x96, 0xfa, 0x93, 0xc7,
+ 0xa1, 0x18, 0x5e, 0x52, 0xa2, 0x12, 0x4c, 0x5c, 0xba, 0x4e, 0x5e, 0xbd, 0x40, 0x56, 0xa6, 0x53,
+ 0xa8, 0x0c, 0x85, 0xc6, 0x85, 0xe5, 0x97, 0x45, 0xcb, 0x5a, 0xfa, 0x75, 0x5e, 0x27, 0x2e, 0x3e,
+ 0xfa, 0x2a, 0xe4, 0x64, 0x36, 0x32, 0x17, 0x31, 0x67, 0xde, 0xdf, 0xcd, 0x1f, 0x1e, 0x82, 0x4b,
+ 0x29, 0xe1, 0xd4, 0x19, 0x0b, 0x5d, 0x83, 0x92, 0x00, 0xaa, 0x0a, 0xf9, 0xd1, 0x64, 0xa1, 0x3a,
+ 0x46, 0xe9, 0xd8, 0x98, 0x5e, 0x83, 0xde, 0x79, 0xc8, 0x49, 0x81, 0xcd, 0x25, 0x92, 0xc6, 0x11,
+ 0xab, 0x89, 0xdd, 0x19, 0xe0, 0x14, 0x7a, 0x0e, 0xb2, 0x37, 0x6d, 0xa7, 0x83, 0x8c, 0x9c, 0xd5,
+ 0x28, 0x6c, 0xcf, 0xcf, 0x25, 0xc1, 0xc6, 0xb4, 0x2f, 0x84, 0xf5, 0xf9, 0xc3, 0xc9, 0x22, 0xa1,
+ 0x1e, 0x5e, 0x19, 0xee, 0x08, 0x67, 0xbe, 0x2e, 0xab, 0xc8, 0xba, 0x54, 0x85, 0x8e, 0xc5, 0xa7,
+ 0x4a, 0x54, 0xb6, 0xe6, 0xab, 0xe3, 0xba, 0x43, 0x82, 0x6b, 0x50, 0x32, 0xca, 0x44, 0xa6, 0x58,
+ 0x87, 0x6b, 0x5c, 0xa6, 0x58, 0x47, 0xd4, 0x96, 0x70, 0x0a, 0xad, 0x42, 0x41, 0x3c, 0xe0, 0xb0,
+ 0x59, 0x80, 0x8e, 0x24, 0x13, 0x7a, 0x23, 0x91, 0x9b, 0x3f, 0x3a, 0xba, 0x33, 0x24, 0xf4, 0x75,
+ 0x28, 0xae, 0x52, 0xa6, 0x22, 0xd8, 0xe1, 0x64, 0x08, 0x1c, 0x21, 0xa9, 0x78, 0x18, 0xc5, 0x29,
+ 0xf4, 0x9a, 0x38, 0x74, 0xc4, 0xdd, 0x33, 0xaa, 0x8d, 0x71, 0xc3, 0xe1, 0xba, 0x16, 0xc6, 0x23,
+ 0x84, 0x94, 0x5f, 0x8d, 0x51, 0x56, 0x79, 0x43, 0x6d, 0xcc, 0x86, 0x0d, 0x29, 0xd7, 0xf6, 0x78,
+ 0x20, 0x86, 0x53, 0x4b, 0xaf, 0xeb, 0x97, 0x26, 0x2b, 0x36, 0xb3, 0xd1, 0x75, 0x98, 0x0a, 0x1f,
+ 0xc3, 0x88, 0x47, 0x54, 0x31, 0x9b, 0x1f, 0x7a, 0xb1, 0x15, 0xb3, 0xf9, 0xe1, 0x97, 0x5b, 0x38,
+ 0xb5, 0xf4, 0x26, 0xcc, 0xca, 0x8b, 0x3f, 0xf9, 0x22, 0xe9, 0x92, 0xef, 0xb9, 0x8c, 0xfb, 0x2c,
+ 0x02, 0x93, 0xb1, 0xa7, 0x4a, 0xc8, 0xb0, 0x9a, 0x51, 0xef, 0xa4, 0x4c, 0x56, 0x46, 0xbe, 0x71,
+ 0xc2, 0xa9, 0xa5, 0xbf, 0x5a, 0x50, 0x36, 0x27, 0x43, 0xb7, 0x0c, 0x6e, 0xc4, 0xd3, 0x1e, 0x53,
+ 0x62, 0x23, 0x9f, 0x1a, 0x99, 0xba, 0x18, 0xfd, 0x2a, 0x08, 0xa7, 0xd0, 0x9b, 0xe2, 0x9d, 0xd2,
+ 0xf0, 0x63, 0x12, 0x74, 0x7c, 0xcf, 0x77, 0x2b, 0x72, 0x92, 0x13, 0x9f, 0xf2, 0x7d, 0x0b, 0x4e,
+ 0x35, 0x5e, 0x7f, 0xf7, 0x83, 0x6a, 0xea, 0xbd, 0x0f, 0xaa, 0xa9, 0x8f, 0x3e, 0xa8, 0x5a, 0xdf,
+ 0xd9, 0xa9, 0x5a, 0xbf, 0xda, 0xa9, 0x5a, 0xef, 0xec, 0x54, 0xad, 0x77, 0x77, 0xaa, 0xd6, 0x3f,
+ 0x76, 0xaa, 0xd6, 0x3f, 0x77, 0xaa, 0xa9, 0x8f, 0x76, 0xaa, 0xd6, 0x5b, 0x1f, 0x56, 0x53, 0xef,
+ 0x7e, 0x58, 0x4d, 0xbd, 0xf7, 0x61, 0x35, 0xf5, 0xcd, 0x13, 0x7b, 0x97, 0x32, 0x64, 0x58, 0xc9,
+ 0x8b, 0xbf, 0xa7, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x08, 0x27, 0xb1, 0x4c, 0x3b, 0x29, 0x00,
+ 0x00,
}
func (x Direction) String() string {
@@ -3914,7 +3923,7 @@ func (this *StreamMetadata) Equal(that interface{}) bool {
if this.StreamHash != that1.StreamHash {
return false
}
- if this.LineSize != that1.LineSize {
+ if this.EntriesSize != that1.EntriesSize {
return false
}
if this.StructuredMetadataSize != that1.StructuredMetadataSize {
@@ -4078,6 +4087,9 @@ func (this *GetStreamUsageResponse) Equal(that interface{}) bool {
if this.ActiveStreams != that1.ActiveStreams {
return false
}
+ if this.Rate != that1.Rate {
+ return false
+ }
if len(this.UnknownStreams) != len(that1.UnknownStreams) {
return false
}
@@ -5897,7 +5909,7 @@ func (this *StreamMetadata) GoString() string {
s := make([]string, 0, 7)
s = append(s, "&logproto.StreamMetadata{")
s = append(s, "StreamHash: "+fmt.Sprintf("%#v", this.StreamHash)+",\n")
- s = append(s, "LineSize: "+fmt.Sprintf("%#v", this.LineSize)+",\n")
+ s = append(s, "EntriesSize: "+fmt.Sprintf("%#v", this.EntriesSize)+",\n")
s = append(s, "StructuredMetadataSize: "+fmt.Sprintf("%#v", this.StructuredMetadataSize)+",\n")
s = append(s, "}")
return strings.Join(s, "")
@@ -5955,10 +5967,11 @@ func (this *GetStreamUsageResponse) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 7)
+ s := make([]string, 0, 8)
s = append(s, "&logproto.GetStreamUsageResponse{")
s = append(s, "Tenant: "+fmt.Sprintf("%#v", this.Tenant)+",\n")
s = append(s, "ActiveStreams: "+fmt.Sprintf("%#v", this.ActiveStreams)+",\n")
+ s = append(s, "Rate: "+fmt.Sprintf("%#v", this.Rate)+",\n")
s = append(s, "UnknownStreams: "+fmt.Sprintf("%#v", this.UnknownStreams)+",\n")
s = append(s, "}")
return strings.Join(s, "")
@@ -7615,8 +7628,8 @@ func (m *StreamMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x18
}
- if m.LineSize != 0 {
- i = encodeVarintLogproto(dAtA, i, uint64(m.LineSize))
+ if m.EntriesSize != 0 {
+ i = encodeVarintLogproto(dAtA, i, uint64(m.EntriesSize))
i--
dAtA[i] = 0x10
}
@@ -7854,7 +7867,12 @@ func (m *GetStreamUsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error)
copy(dAtA[i:], dAtA7[:j6])
i = encodeVarintLogproto(dAtA, i, uint64(j6))
i--
- dAtA[i] = 0x1a
+ dAtA[i] = 0x22
+ }
+ if m.Rate != 0 {
+ i = encodeVarintLogproto(dAtA, i, uint64(m.Rate))
+ i--
+ dAtA[i] = 0x18
}
if m.ActiveStreams != 0 {
i = encodeVarintLogproto(dAtA, i, uint64(m.ActiveStreams))
@@ -10385,8 +10403,8 @@ func (m *StreamMetadata) Size() (n int) {
if m.StreamHash != 0 {
n += 1 + sovLogproto(uint64(m.StreamHash))
}
- if m.LineSize != 0 {
- n += 1 + sovLogproto(uint64(m.LineSize))
+ if m.EntriesSize != 0 {
+ n += 1 + sovLogproto(uint64(m.EntriesSize))
}
if m.StructuredMetadataSize != 0 {
n += 1 + sovLogproto(uint64(m.StructuredMetadataSize))
@@ -10488,6 +10506,9 @@ func (m *GetStreamUsageResponse) Size() (n int) {
if m.ActiveStreams != 0 {
n += 1 + sovLogproto(uint64(m.ActiveStreams))
}
+ if m.Rate != 0 {
+ n += 1 + sovLogproto(uint64(m.Rate))
+ }
if len(m.UnknownStreams) > 0 {
l = 0
for _, e := range m.UnknownStreams {
@@ -11630,7 +11651,7 @@ func (this *StreamMetadata) String() string {
}
s := strings.Join([]string{`&StreamMetadata{`,
`StreamHash:` + fmt.Sprintf("%v", this.StreamHash) + `,`,
- `LineSize:` + fmt.Sprintf("%v", this.LineSize) + `,`,
+ `EntriesSize:` + fmt.Sprintf("%v", this.EntriesSize) + `,`,
`StructuredMetadataSize:` + fmt.Sprintf("%v", this.StructuredMetadataSize) + `,`,
`}`,
}, "")
@@ -11698,6 +11719,7 @@ func (this *GetStreamUsageResponse) String() string {
s := strings.Join([]string{`&GetStreamUsageResponse{`,
`Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`,
`ActiveStreams:` + fmt.Sprintf("%v", this.ActiveStreams) + `,`,
+ `Rate:` + fmt.Sprintf("%v", this.Rate) + `,`,
`UnknownStreams:` + fmt.Sprintf("%v", this.UnknownStreams) + `,`,
`}`,
}, "")
@@ -12879,9 +12901,9 @@ func (m *StreamMetadata) Unmarshal(dAtA []byte) error {
}
case 2:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LineSize", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field EntriesSize", wireType)
}
- m.LineSize = 0
+ m.EntriesSize = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowLogproto
@@ -12891,7 +12913,7 @@ func (m *StreamMetadata) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.LineSize |= uint64(b&0x7F) << shift
+ m.EntriesSize |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@@ -13599,6 +13621,25 @@ func (m *GetStreamUsageResponse) Unmarshal(dAtA []byte) error {
}
}
case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType)
+ }
+ m.Rate = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLogproto
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Rate |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
if wireType == 0 {
var v uint64
for shift := uint(0); ; shift += 7 {
diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto
index aac51b12168ce..a228f4f878de9 100644
--- a/pkg/logproto/logproto.proto
+++ b/pkg/logproto/logproto.proto
@@ -57,7 +57,7 @@ message StreamRatesResponse {
message StreamMetadata {
uint64 streamHash = 1;
- uint64 lineSize = 2;
+ uint64 entriesSize = 2;
uint64 structuredMetadataSize = 3;
}
@@ -94,7 +94,8 @@ message GetStreamUsageRequest {
message GetStreamUsageResponse {
string tenant = 1;
uint64 activeStreams = 2;
- repeated uint64 unknownStreams = 3;
+ int64 rate = 3;
+ repeated uint64 unknownStreams = 4;
}
message StreamRate {
|
feat
|
Implement global tenant rate limiting (#16727)
|
c7ad168a33f131f6feeabae4c985874e03e17aca
|
2024-01-12 18:48:33
|
Joao Marcal
|
operator: updates mixins to fix structured metadata dashboards (#11671)
| false
|
diff --git a/operator/CHANGELOG.md b/operator/CHANGELOG.md
index f6cfa9a5cda01..ad9b319b625c9 100644
--- a/operator/CHANGELOG.md
+++ b/operator/CHANGELOG.md
@@ -1,5 +1,6 @@
## Main
+- [11671](https://github.com/grafana/loki/pull/11671) **JoaoBraveCoding**: Update mixins to fix structured metadata dashboards
- [11624](https://github.com/grafana/loki/pull/11624) **xperimental**: React to changes in ConfigMap used for storage CA
- [11481](https://github.com/grafana/loki/pull/11481) **JoaoBraveCoding**: Adds AWS STS support
- [11533](https://github.com/grafana/loki/pull/11533) **periklis**: Add serviceaccount per LokiStack resource
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
index e1adb4dd6cc0a..df5ea66e6d2a5 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-reads.json
@@ -217,9 +217,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 3,
@@ -493,9 +493,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 6,
@@ -769,9 +769,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 9,
@@ -1045,9 +1045,9 @@
"group": "A",
"mode": "normal"
}
- }
- },
- "unit": "s"
+ },
+ "unit": "s"
+ }
},
"fill": 1,
"id": 15,
diff --git a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
index 58107485d370c..8053d353b1135 100644
--- a/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
+++ b/operator/internal/manifests/openshift/internal/dashboards/static/grafana-dashboard-lokistack-writes.json
@@ -66,7 +66,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{namespace=\"$namespace\",job=~\".+-distributor-http\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{status}}",
@@ -142,7 +142,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.99, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"loki_api_v1_push\"})) * 1e3",
+ "expr": "histogram_quantile(0.99, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "99th Percentile",
@@ -150,7 +150,7 @@
"step": 10
},
{
- "expr": "histogram_quantile(0.50, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"loki_api_v1_push\"})) * 1e3",
+ "expr": "histogram_quantile(0.50, sum by (le) (namespace_job_route:loki_request_duration_seconds_bucket:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "50th Percentile",
@@ -158,7 +158,7 @@
"step": 10
},
{
- "expr": "1e3 * sum(namespace_job_route:loki_request_duration_seconds_sum:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"loki_api_v1_push\"}) / sum(namespace_job_route:loki_request_duration_seconds_count:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"loki_api_v1_push\"})",
+ "expr": "1e3 * sum(namespace_job_route:loki_request_duration_seconds_sum:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}) / sum(namespace_job_route:loki_request_duration_seconds_count:sum_rate{namespace=\"$namespace\", job=~\".+-distributor-http\", route=\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "Average",
@@ -246,7 +246,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval]))",
+ "expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "bytes",
@@ -322,7 +322,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",route=\"loki_api_v1_push\",}[$__rate_interval]))",
+ "expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{namespace=\"$namespace\",job=~\".+-distributor-http\",}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{tenant}}",
diff --git a/operator/jsonnet/config.libsonnet b/operator/jsonnet/config.libsonnet
index efdc1c6103d5c..82dc625da2a4f 100644
--- a/operator/jsonnet/config.libsonnet
+++ b/operator/jsonnet/config.libsonnet
@@ -238,7 +238,6 @@ local utils = (import 'github.com/grafana/jsonnet-libs/mixin-utils/utils.libsonn
distributor:: [
utils.selector.eq('namespace', '$namespace'),
utils.selector.re('job', '.+-distributor-http'),
- utils.selector.eq('route', 'loki_api_v1_push'),
],
ingester:: [
utils.selector.eq('namespace', '$namespace'),
diff --git a/operator/jsonnet/jsonnetfile.json b/operator/jsonnet/jsonnetfile.json
index 4b25fb159b3d8..2bc2549a3c600 100644
--- a/operator/jsonnet/jsonnetfile.json
+++ b/operator/jsonnet/jsonnetfile.json
@@ -8,7 +8,7 @@
"subdir": "production/loki-mixin"
}
},
- "version": "bd505f8e2d37172ff35a89f4ac42efec9566a263"
+ "version": "0694d797dec010393567704211638219c1971b46"
}
],
"legacyImports": true
diff --git a/operator/jsonnet/jsonnetfile.lock.json b/operator/jsonnet/jsonnetfile.lock.json
index 27d2e6e8756c6..3a0710db7565f 100644
--- a/operator/jsonnet/jsonnetfile.lock.json
+++ b/operator/jsonnet/jsonnetfile.lock.json
@@ -38,8 +38,8 @@
"subdir": "production/loki-mixin"
}
},
- "version": "bd505f8e2d37172ff35a89f4ac42efec9566a263",
- "sum": "yiXXBAcWfMkYSJthU2OZSgHHmveWvmRT6aM1V0MaAjs="
+ "version": "0694d797dec010393567704211638219c1971b46",
+ "sum": "Pw/9T/ZRjXLqTivU5xkJnrP5kFdET2FDUjjG1G96GmQ="
},
{
"source": {
|
operator
|
updates mixins to fix structured metadata dashboards (#11671)
|
760c255431bec88173112a4d69eb935602dab6c1
|
2023-08-25 21:57:17
|
Kaviraj Kanagaraj
|
doc(schema): Start recommending `v12` schema instead of `v11` (#10355)
| false
|
diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md
index 1821c673865cd..179b30c046873 100644
--- a/docs/sources/configure/_index.md
+++ b/docs/sources/configure/_index.md
@@ -4137,7 +4137,7 @@ The `period_config` block configures what index schemas should be used for from
# If omitted, defaults to the same value as store.
[object_store: <string> | default = ""]
-# The schema version to use, current recommended schema is v11.
+# The schema version to use, current recommended schema is v12.
[schema: <string> | default = ""]
# Configures how the index is updated and stored.
diff --git a/pkg/storage/config/schema_config.go b/pkg/storage/config/schema_config.go
index f035e79f06132..5bf215049d48e 100644
--- a/pkg/storage/config/schema_config.go
+++ b/pkg/storage/config/schema_config.go
@@ -159,7 +159,7 @@ type PeriodConfig struct {
IndexType string `yaml:"store" doc:"description=store and object_store below affect which <storage_config> key is used.\nWhich store to use for the index. Either aws, aws-dynamo, gcp, bigtable, bigtable-hashed, cassandra, boltdb or boltdb-shipper. "`
// type of object client to use; if omitted, defaults to store.
ObjectType string `yaml:"object_store" doc:"description=Which store to use for the chunks. Either aws, azure, gcp, bigtable, gcs, cassandra, swift, filesystem or a named_store (refer to named_stores_config). If omitted, defaults to the same value as store."`
- Schema string `yaml:"schema" doc:"description=The schema version to use, current recommended schema is v11."`
+ Schema string `yaml:"schema" doc:"description=The schema version to use, current recommended schema is v12."`
IndexTables PeriodicTableConfig `yaml:"index" doc:"description=Configures how the index is updated and stored."`
ChunkTables PeriodicTableConfig `yaml:"chunks" doc:"description=Configured how the chunks are updated and stored."`
RowShards uint32 `yaml:"row_shards" doc:"description=How many shards will be created. Only used if schema is v10 or greater."`
|
doc
|
Start recommending `v12` schema instead of `v11` (#10355)
|
f899571ecf9d26a191849dcd5d644833423ecbb3
|
2024-10-30 22:01:11
|
renovate[bot]
|
chore(deps): update anchore/sbom-action action to v0.17.6 (#14649)
| false
|
diff --git a/.github/workflows/syft-sbom-ci.yml b/.github/workflows/syft-sbom-ci.yml
index ed1748a641cf2..1c1686b515e4b 100644
--- a/.github/workflows/syft-sbom-ci.yml
+++ b/.github/workflows/syft-sbom-ci.yml
@@ -14,7 +14,7 @@ jobs:
uses: actions/checkout@v4
- name: Anchore SBOM Action
- uses: anchore/[email protected]
+ uses: anchore/[email protected]
with:
artifact-name: ${{ github.event.repository.name }}-spdx.json
|
chore
|
update anchore/sbom-action action to v0.17.6 (#14649)
|
a355840cec12a9e80696e6b2fc4cbab764b45f69
|
2024-11-07 21:05:13
|
renovate[bot]
|
fix(deps): update aws-sdk-go-v2 monorepo (#14820)
| false
|
diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod
index efbddc2890619..872b8bc6a4937 100644
--- a/tools/lambda-promtail/go.mod
+++ b/tools/lambda-promtail/go.mod
@@ -4,9 +4,9 @@ go 1.22
require (
github.com/aws/aws-lambda-go v1.47.0
- github.com/aws/aws-sdk-go-v2 v1.32.3
- github.com/aws/aws-sdk-go-v2/config v1.28.1
- github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2
+ github.com/aws/aws-sdk-go-v2 v1.32.4
+ github.com/aws/aws-sdk-go-v2/config v1.28.2
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3
github.com/go-kit/log v0.2.1
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
@@ -24,19 +24,19 @@ require (
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.17.42 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.43 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.23 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.24.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 // indirect
github.com/aws/smithy-go v1.22.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum
index 6446f0ed7e39b..ef0c3c773a4a0 100644
--- a/tools/lambda-promtail/go.sum
+++ b/tools/lambda-promtail/go.sum
@@ -48,40 +48,40 @@ github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1s
github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A=
github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
-github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk=
-github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
+github.com/aws/aws-sdk-go-v2 v1.32.4 h1:S13INUiTxgrPueTmrm5DZ+MiAo99zYzHEFh1UNkOxNE=
+github.com/aws/aws-sdk-go-v2 v1.32.4/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA=
-github.com/aws/aws-sdk-go-v2/config v1.28.1 h1:oxIvOUXy8x0U3fR//0eq+RdCKimWI900+SV+10xsCBw=
-github.com/aws/aws-sdk-go-v2/config v1.28.1/go.mod h1:bRQcttQJiARbd5JZxw6wG0yIK3eLeSCPdg6uqmmlIiI=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.42 h1:sBP0RPjBU4neGpIYyx8mkU2QqLPl5u9cmdTWVzIpHkM=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.42/go.mod h1:FwZBfU530dJ26rv9saAbxa9Ej3eF/AK0OAY86k13n4M=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 h1:68jFVtt3NulEzojFesM/WVarlFpCaXLKaBxDpzkQ9OQ=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18/go.mod h1:Fjnn5jQVIo6VyedMc0/EhPpfNlPl7dHV916O6B+49aE=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ=
+github.com/aws/aws-sdk-go-v2/config v1.28.2 h1:FLvWA97elBiSPdIol4CXfIAY1wlq3KzoSgkMuZSuSe8=
+github.com/aws/aws-sdk-go-v2/config v1.28.2/go.mod h1:hNmQsKfUqpKz2yfnZUB60GCemPmeqAalVTui0gOxjAE=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.43 h1:SEGdVOOE1Wyr2XFKQopQ5GYjym3nYHcphesdt78rNkY=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.43/go.mod h1:3aiza5kSyAE4eujSanOkSkAmX/RnVqslM+GRQ/Xvv4c=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19 h1:woXadbf0c7enQ2UGCi8gW/WuKmE0xIzxBF/eD94jMKQ=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.19/go.mod h1:zminj5ucw7w0r65bP6nhyOd3xL6veAUMc3ElGMoLVb4=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23 h1:A2w6m6Tmr+BNXjDsr7M90zkWjsu4JXHwrzPg235STs4=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.23/go.mod h1:35EVp9wyeANdujZruvHiQUAo9E3vbhnIO1mTCAxMlY0=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23 h1:pgYW9FCabt2M25MoHYCfMrVY2ghiiBKYWUVXfwZs+sU=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.23/go.mod h1:c48kLgzO19wAu3CPkDWC28JbaJ+hfQlsdl7I2+oqIbk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 h1:yV+hCAHZZYJQcwAaszoBNwLbPItHvApxT0kVIw6jRgs=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22/go.mod h1:kbR1TL8llqB1eGnVbybcA4/wgScxdylOdyAd51yxPdw=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.23 h1:1SZBDiRzzs3sNhOMVApyWPduWYGAX0imGy06XiBnCAM=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.23/go.mod h1:i9TkxgbZmHVh2S0La6CAXtnyFhlCX/pJ0JsOvBAS6Mk=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 h1:kT6BcZsmMtNkP/iYMcRG+mIEA/IbeiUimXtGmqF39y0=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3/go.mod h1:Z8uGua2k4PPaGOYn66pK02rhMrot3Xk3tpBuUFPomZU=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlgWH3hpPUnd6U0ikcl6LLA9sLkXE2w1fpMvY=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE=
-github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 h1:UTpsIf0loCIWEbrqdLb+0RxnTXfWh2vhw4nQmFi4nPc=
-github.com/aws/aws-sdk-go-v2/service/sso v1.24.3/go.mod h1:FZ9j3PFHHAR+w0BSEjK955w5YD2UwB/l/H0yAK3MJvI=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 h1:2YCmIXv3tmiItw0LlYf6v7gEHebLY45kBEnPezbUKyU=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3/go.mod h1:u19stRyNPxGhj6dRm+Cdgu6N75qnbW7+QN0q0dsAk58=
-github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 h1:wVnQ6tigGsRqSWDEEyH6lSAJ9OyFUsSnbaUWChuSGzs=
-github.com/aws/aws-sdk-go-v2/service/sts v1.32.3/go.mod h1:VZa9yTFyj4o10YGsmDO4gbQJUvvhY72fhumT8W4LqsE=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.4 h1:aaPpoG15S2qHkWm4KlEyF01zovK1nW4BBbyXuHNSE90=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.4/go.mod h1:eD9gS2EARTKgGr/W5xwgY/ik9z/zqpW+m/xOQbVxrMk=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4 h1:tHxQi/XHPK0ctd/wdOw0t7Xrc2OxcRCnVzv8lwWPu0c=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.4/go.mod h1:4GQbF1vJzG60poZqWatZlhP31y8PGCCVTvIGPdaaYJ0=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4 h1:E5ZAVOmI2apR8ADb72Q63KqwwwdW1XcMeXIlrZ1Psjg=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.4/go.mod h1:wezzqVUOVVdk+2Z/JzQT4NxAU0NbhRe5W8pIE72jsWI=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3 h1:neNOYJl72bHrz9ikAEED4VqWyND/Po0DnEx64RW6YM4=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.66.3/go.mod h1:TMhLIyRIyoGVlaEMAt+ITMbwskSTpcGsCPDq91/ihY0=
+github.com/aws/aws-sdk-go-v2/service/sso v1.24.4 h1:BqE3NRG6bsODh++VMKMsDmFuJTHrdD4rJZqHjDeF6XI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.24.4/go.mod h1:wrMCEwjFPms+V86TCQQeOxQF/If4vT44FGIOFiMC2ck=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4 h1:zcx9LiGWZ6i6pjdcoE9oXAB6mUdeyC36Ia/QEiIvYdg=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.4/go.mod h1:Tp/ly1cTjRLGBBmNccFumbZ8oqpZlpdhFf80SrRh4is=
+github.com/aws/aws-sdk-go-v2/service/sts v1.32.4 h1:yDxvkz3/uOKfxnv8YhzOi9m+2OGIxF+on3KOISbK5IU=
+github.com/aws/aws-sdk-go-v2/service/sts v1.32.4/go.mod h1:9XEUty5v5UAsMiFOBJrNibZgwCeOma73jgGwwhgffa8=
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
fix
|
update aws-sdk-go-v2 monorepo (#14820)
|
c182940201fb4a51de84532d6c4de03f256e9cc6
|
2023-10-11 14:05:50
|
ngc4579
|
helm: Fix GrafanaAgent tolerations scope (#10813)
| false
|
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index 7f840126e113d..61f132474a82c 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,7 +13,12 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 5.28.0
+
+- [BUGFIX] Fix GrafanaAgent tolerations scope
+
## 5.27.0
+
- [CHANGE] Bump `nginxinc/nginx-unpriviledged` image version to remediate [CVE-2023-4863](https://github.com/advisories/GHSA-j7hp-h8jx-5ppr)
## 5.26.0
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 6915560bbb059..e42540d27622d 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 2.9.1
-version: 5.27.0
+version: 5.28.0
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index da60427876754..05d909ef8f3a5 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/templates/monitoring/grafana-agent.yaml b/production/helm/loki/templates/monitoring/grafana-agent.yaml
index c9723410285ab..010d9604aab70 100644
--- a/production/helm/loki/templates/monitoring/grafana-agent.yaml
+++ b/production/helm/loki/templates/monitoring/grafana-agent.yaml
@@ -29,11 +29,11 @@ spec:
matchLabels:
{{- include "loki.selectorLabels" $ | nindent 8 }}
{{- end }}
+ {{- end }}
{{- with .tolerations }}
tolerations:
{{- toYaml . | nindent 4 }}
{{- end }}
- {{- end }}
---
|
helm
|
Fix GrafanaAgent tolerations scope (#10813)
|
2dfc13bb73c73c6c8cfb57c23ce832f902d8a43e
|
2024-07-25 14:23:38
|
George Robinson
|
feat: add loki_ingester_rf1_segment_age_seconds metric (#13653)
| false
|
diff --git a/pkg/ingester-rf1/flush.go b/pkg/ingester-rf1/flush.go
index 2242569c2c25e..aa22166d4fd3e 100644
--- a/pkg/ingester-rf1/flush.go
+++ b/pkg/ingester-rf1/flush.go
@@ -97,12 +97,9 @@ func (i *Ingester) flush(l log.Logger, j int, it *wal.PendingSegment) error {
func (i *Ingester) flushSegment(ctx context.Context, j int, w *wal.SegmentWriter) error {
start := time.Now()
- defer func() {
- i.metrics.flushDuration.Observe(time.Since(start).Seconds())
- w.ReportMetrics()
- }()
i.metrics.flushesTotal.Add(1)
+ defer func() { i.metrics.flushDuration.Observe(time.Since(start).Seconds()) }()
buf := i.flushBuffers[j]
defer buf.Reset()
@@ -111,6 +108,9 @@ func (i *Ingester) flushSegment(ctx context.Context, j int, w *wal.SegmentWriter
return err
}
+ stats := wal.GetSegmentStats(w, time.Now())
+ wal.ReportSegmentStats(stats, i.metrics.segmentMetrics)
+
id := ulid.MustNew(ulid.Timestamp(time.Now()), rand.Reader).String()
if err := i.store.PutObject(ctx, fmt.Sprintf("loki-v2/wal/anon/"+id), buf); err != nil {
i.metrics.flushFailuresTotal.Inc()
@@ -121,7 +121,7 @@ func (i *Ingester) flushSegment(ctx context.Context, j int, w *wal.SegmentWriter
Block: w.Meta(id),
}); err != nil {
i.metrics.flushFailuresTotal.Inc()
- return fmt.Errorf("metastore add block: %w", err)
+ return fmt.Errorf("failed to update metastore: %w", err)
}
return nil
diff --git a/pkg/ingester-rf1/ingester.go b/pkg/ingester-rf1/ingester.go
index 8182449c6d596..0b5a6c5fd724a 100644
--- a/pkg/ingester-rf1/ingester.go
+++ b/pkg/ingester-rf1/ingester.go
@@ -244,7 +244,7 @@ func New(cfg Config, clientConfig client.Config,
MaxAge: cfg.MaxSegmentAge,
MaxSegments: int64(cfg.MaxSegments),
MaxSegmentSize: int64(cfg.MaxSegmentSize),
- }, wal.NewMetrics(registerer))
+ }, wal.NewManagerMetrics(registerer))
if err != nil {
return nil, err
}
diff --git a/pkg/ingester-rf1/metrics.go b/pkg/ingester-rf1/metrics.go
index 817fc9b46d648..91b3d398b7b3c 100644
--- a/pkg/ingester-rf1/metrics.go
+++ b/pkg/ingester-rf1/metrics.go
@@ -3,18 +3,37 @@ package ingesterrf1
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+
+ "github.com/grafana/loki/v3/pkg/storage/wal"
)
-type flushMetrics struct {
+type ingesterMetrics struct {
+ autoForgetUnhealthyIngestersTotal prometheus.Counter
+ limiterEnabled prometheus.Gauge
+ // Shutdown marker for ingester scale down.
+ shutdownMarker prometheus.Gauge
flushesTotal prometheus.Counter
flushFailuresTotal prometheus.Counter
flushQueues prometheus.Gauge
flushDuration prometheus.Histogram
- flushSizeBytes prometheus.Histogram
+ flushSize prometheus.Histogram
+ segmentMetrics *wal.SegmentMetrics
}
-func newFlushMetrics(r prometheus.Registerer) *flushMetrics {
- return &flushMetrics{
+func newIngesterMetrics(r prometheus.Registerer) *ingesterMetrics {
+ return &ingesterMetrics{
+ autoForgetUnhealthyIngestersTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{
+ Name: "loki_ingester_rf1_autoforget_unhealthy_ingesters_total",
+ Help: "Total number of ingesters automatically forgotten.",
+ }),
+ limiterEnabled: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+ Name: "loki_ingester_rf1_limiter_enabled",
+ Help: "1 if the limiter is enabled, otherwise 0.",
+ }),
+ shutdownMarker: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+ Name: "loki_ingester_rf1_shutdown_marker",
+ Help: "1 if prepare shutdown has been called, 0 otherwise.",
+ }),
flushesTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{
Name: "loki_ingester_rf1_flushes_total",
Help: "The total number of flushes.",
@@ -33,37 +52,12 @@ func newFlushMetrics(r prometheus.Registerer) *flushMetrics {
Buckets: prometheus.ExponentialBuckets(0.001, 4, 8),
NativeHistogramBucketFactor: 1.1,
}),
- flushSizeBytes: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ flushSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
Name: "loki_ingester_rf1_flush_size_bytes",
Help: "The flush size (as written to object storage).",
Buckets: prometheus.ExponentialBuckets(100, 10, 8),
NativeHistogramBucketFactor: 1.1,
}),
- }
-}
-
-type ingesterMetrics struct {
- autoForgetUnhealthyIngestersTotal prometheus.Counter
- limiterEnabled prometheus.Gauge
- // Shutdown marker for ingester scale down.
- shutdownMarker prometheus.Gauge
- *flushMetrics
-}
-
-func newIngesterMetrics(r prometheus.Registerer) *ingesterMetrics {
- return &ingesterMetrics{
- autoForgetUnhealthyIngestersTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{
- Name: "loki_ingester_rf1_autoforget_unhealthy_ingesters_total",
- Help: "Total number of ingesters automatically forgotten.",
- }),
- limiterEnabled: promauto.With(r).NewGauge(prometheus.GaugeOpts{
- Name: "loki_ingester_rf1_limiter_enabled",
- Help: "1 if the limiter is enabled, otherwise 0.",
- }),
- shutdownMarker: promauto.With(r).NewGauge(prometheus.GaugeOpts{
- Name: "loki_ingester_rf1_shutdown_marker",
- Help: "1 if prepare shutdown has been called, 0 otherwise.",
- }),
- flushMetrics: newFlushMetrics(r),
+ segmentMetrics: wal.NewSegmentMetrics(r),
}
}
diff --git a/pkg/storage/wal/manager.go b/pkg/storage/wal/manager.go
index 34f7ce7bcf7fa..fc23cb21e742f 100644
--- a/pkg/storage/wal/manager.go
+++ b/pkg/storage/wal/manager.go
@@ -12,12 +12,6 @@ import (
"github.com/grafana/loki/v3/pkg/logproto"
)
-const (
- DefaultMaxAge = 500 * time.Millisecond
- DefaultMaxSegments = 10
- DefaultMaxSegmentSize = 8 * 1024 * 1024 // 8MB.
-)
-
var (
// ErrClosed is returned when the WAL is closed. It is a permanent error
// as once closed, a WAL cannot be re-opened.
@@ -109,31 +103,24 @@ type Manager struct {
clock quartz.Clock
}
-// segment is similar to PendingSegment, however it is an internal struct used
-// in the available and pending lists. It contains a single-use result that is
-// returned to callers appending to the WAL and a re-usable segment that is reset
-// after each flush.
+// segment is an internal struct used in the available and pending lists. It
+// contains a single-use result that is returned to callers appending to the
+// WAL and a re-usable segment that is reset after each flush.
type segment struct {
r *AppendResult
w *SegmentWriter
-
- // moved is the time the segment was moved to the pending list. It is used
- // to calculate the age of the segment. A segment is moved when it has
- // exceeded the maximum age or the maximum size.
- moved time.Time
}
// PendingSegment contains a result and the segment to be flushed.
type PendingSegment struct {
Result *AppendResult
Writer *SegmentWriter
- Moved time.Time
}
-func NewManager(cfg Config, metrics *Metrics) (*Manager, error) {
+func NewManager(cfg Config, metrics *ManagerMetrics) (*Manager, error) {
m := Manager{
cfg: cfg,
- metrics: metrics.ManagerMetrics,
+ metrics: metrics,
available: list.New(),
pending: list.New(),
clock: quartz.NewReal(),
@@ -142,7 +129,7 @@ func NewManager(cfg Config, metrics *Metrics) (*Manager, error) {
m.metrics.NumPending.Set(0)
m.metrics.NumFlushing.Set(0)
for i := int64(0); i < cfg.MaxSegments; i++ {
- w, err := NewWalSegmentWriter(metrics.SegmentMetrics)
+ w, err := NewWalSegmentWriter()
if err != nil {
return nil, err
}
@@ -205,11 +192,7 @@ func (m *Manager) NextPending() (*PendingSegment, error) {
m.pending.Remove(el)
m.metrics.NumPending.Dec()
m.metrics.NumFlushing.Inc()
- return &PendingSegment{
- Result: s.r,
- Writer: s.w,
- Moved: s.moved,
- }, nil
+ return &PendingSegment{Result: s.r, Writer: s.w}, nil
}
// Put resets the segment and puts it back in the available list to accept
@@ -229,7 +212,6 @@ func (m *Manager) Put(s *PendingSegment) {
// move the element from the available list to the pending list and sets the
// relevant metrics.
func (m *Manager) move(el *list.Element, s *segment) {
- s.moved = m.clock.Now()
m.pending.PushBack(s)
m.metrics.NumPending.Inc()
m.available.Remove(el)
diff --git a/pkg/storage/wal/manager_test.go b/pkg/storage/wal/manager_test.go
index 34d7565e03b54..93e10fbaa06a9 100644
--- a/pkg/storage/wal/manager_test.go
+++ b/pkg/storage/wal/manager_test.go
@@ -20,7 +20,7 @@ func TestManager_Append(t *testing.T) {
MaxAge: 30 * time.Second,
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Append some data.
@@ -59,7 +59,7 @@ func TestManager_AppendFailed(t *testing.T) {
MaxAge: 30 * time.Second,
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Append some data.
@@ -92,7 +92,7 @@ func TestManager_AppendFailedWALClosed(t *testing.T) {
MaxAge: 30 * time.Second,
MaxSegments: 10,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Append some data.
@@ -126,7 +126,7 @@ func TestManager_AppendFailedWALFull(t *testing.T) {
MaxAge: 30 * time.Second,
MaxSegments: 10,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Should be able to write 100KB of data, 10KB per segment.
@@ -161,7 +161,7 @@ func TestManager_AppendMaxAgeExceeded(t *testing.T) {
MaxAge: 100 * time.Millisecond,
MaxSegments: 1,
MaxSegmentSize: 8 * 1024 * 1024, // 8MB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Create a mock clock.
@@ -208,7 +208,7 @@ func TestManager_AppendMaxSizeExceeded(t *testing.T) {
MaxAge: 30 * time.Second,
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Append 512B of data.
@@ -250,7 +250,7 @@ func TestManager_NextPending(t *testing.T) {
MaxAge: 30 * time.Second,
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// There should be no segments waiting to be flushed as no data has been
@@ -286,7 +286,7 @@ func TestManager_NextPendingAge(t *testing.T) {
MaxAge: 100 * time.Millisecond,
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Create a mock clock.
@@ -311,7 +311,7 @@ func TestManager_NextPendingAge(t *testing.T) {
s, err := m.NextPending()
require.NoError(t, err)
require.NotNil(t, s)
- require.Equal(t, 100*time.Millisecond, s.Writer.Age(s.Moved))
+ require.Equal(t, 100*time.Millisecond, s.Writer.Age(clock.Now()))
m.Put(s)
// Append 1KB of data using two separate append requests, 1ms apart.
@@ -342,7 +342,7 @@ func TestManager_NextPendingAge(t *testing.T) {
s, err = m.NextPending()
require.NoError(t, err)
require.NotNil(t, s)
- require.Equal(t, time.Millisecond, s.Writer.Age(s.Moved))
+ require.Equal(t, time.Millisecond, s.Writer.Age(clock.Now()))
}
func TestManager_NextPendingMaxAgeExceeded(t *testing.T) {
@@ -350,7 +350,7 @@ func TestManager_NextPendingMaxAgeExceeded(t *testing.T) {
MaxAge: 100 * time.Millisecond,
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Create a mock clock.
@@ -392,7 +392,7 @@ func TestManager_NextPendingWALClosed(t *testing.T) {
MaxAge: 30 * time.Second,
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// Append some data.
@@ -435,7 +435,7 @@ func TestManager_Put(t *testing.T) {
MaxAge: 30 * time.Second,
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(nil))
+ }, NewManagerMetrics(nil))
require.NoError(t, err)
// There should be 1 available and 0 pending segments.
@@ -482,7 +482,7 @@ func TestManager_Metrics(t *testing.T) {
m, err := NewManager(Config{
MaxSegments: 1,
MaxSegmentSize: 1024, // 1KB
- }, NewMetrics(r))
+ }, NewManagerMetrics(r))
require.NoError(t, err)
metricNames := []string{
diff --git a/pkg/storage/wal/metrics.go b/pkg/storage/wal/metrics.go
index a0c0676c6c81c..194580959d518 100644
--- a/pkg/storage/wal/metrics.go
+++ b/pkg/storage/wal/metrics.go
@@ -7,8 +7,8 @@ import (
type ManagerMetrics struct {
NumAvailable prometheus.Gauge
- NumPending prometheus.Gauge
NumFlushing prometheus.Gauge
+ NumPending prometheus.Gauge
}
func NewManagerMetrics(r prometheus.Registerer) *ManagerMetrics {
@@ -17,34 +17,35 @@ func NewManagerMetrics(r prometheus.Registerer) *ManagerMetrics {
Name: "wal_segments_available",
Help: "The number of WAL segments accepting writes.",
}),
- NumPending: promauto.With(r).NewGauge(prometheus.GaugeOpts{
- Name: "wal_segments_pending",
- Help: "The number of WAL segments waiting to be flushed.",
- }),
NumFlushing: promauto.With(r).NewGauge(prometheus.GaugeOpts{
Name: "wal_segments_flushing",
Help: "The number of WAL segments being flushed.",
}),
+ NumPending: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+ Name: "wal_segments_pending",
+ Help: "The number of WAL segments waiting to be flushed.",
+ }),
}
}
type SegmentMetrics struct {
- outputSizeBytes prometheus.Histogram
- inputSizeBytes prometheus.Histogram
- streams prometheus.Histogram
- tenants prometheus.Histogram
+ age prometheus.Histogram
+ size prometheus.Histogram
+ streams prometheus.Histogram
+ tenants prometheus.Histogram
+ writeSize prometheus.Histogram
}
func NewSegmentMetrics(r prometheus.Registerer) *SegmentMetrics {
return &SegmentMetrics{
- outputSizeBytes: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
- Name: "loki_ingester_rf1_segment_output_size_bytes",
- Help: "The segment size as written to disk (compressed).",
- Buckets: prometheus.ExponentialBuckets(100, 10, 8),
+ age: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ Name: "loki_ingester_rf1_segment_age_seconds",
+ Help: "The segment age (time between first append and flush).",
+ Buckets: prometheus.ExponentialBuckets(0.001, 4, 8),
NativeHistogramBucketFactor: 1.1,
}),
- inputSizeBytes: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
- Name: "loki_ingester_rf1_segment_input_size_bytes",
+ size: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ Name: "loki_ingester_rf1_segment_size_bytes",
Help: "The segment size (uncompressed).",
Buckets: prometheus.ExponentialBuckets(100, 10, 8),
NativeHistogramBucketFactor: 1.1,
@@ -61,17 +62,11 @@ func NewSegmentMetrics(r prometheus.Registerer) *SegmentMetrics {
Buckets: prometheus.ExponentialBuckets(1, 2, 10),
NativeHistogramBucketFactor: 1.1,
}),
- }
-}
-
-type Metrics struct {
- SegmentMetrics *SegmentMetrics
- ManagerMetrics *ManagerMetrics
-}
-
-func NewMetrics(r prometheus.Registerer) *Metrics {
- return &Metrics{
- ManagerMetrics: NewManagerMetrics(r),
- SegmentMetrics: NewSegmentMetrics(r),
+ writeSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
+ Name: "loki_ingester_rf1_segment_write_size_bytes",
+ Help: "The segment size as written to disk (compressed).",
+ Buckets: prometheus.ExponentialBuckets(100, 10, 8),
+ NativeHistogramBucketFactor: 1.1,
+ }),
}
}
diff --git a/pkg/storage/wal/segment.go b/pkg/storage/wal/segment.go
index 1e823d532bec7..5922d38fe7395 100644
--- a/pkg/storage/wal/segment.go
+++ b/pkg/storage/wal/segment.go
@@ -47,7 +47,6 @@ type streamID struct {
}
type SegmentWriter struct {
- metrics *SegmentMetrics
streams map[streamID]*streamSegment
buf1 encoding.Encbuf
outputSize atomic.Int64
@@ -65,6 +64,45 @@ type SegmentWriter struct {
lastAppend time.Time
}
+// SegmentStats contains the stats for a SegmentWriter.
+type SegmentStats struct {
+ // Age is the time between the first append and the flush.
+ Age time.Duration
+ // Idle is the time between the last append and the flush.
+ Idle time.Duration
+ Streams int
+ Tenants int
+ Size int64
+ WriteSize int64
+}
+
+// GetSegmentStats returns the stats for a SegmentWriter. The age of a segment
+// is calculated from t. WriteSize is zero if GetSegmentStats is called before
+// SegmentWriter.WriteTo.
+func GetSegmentStats(w *SegmentWriter, t time.Time) SegmentStats {
+ tenants := make(map[string]struct{}, 64)
+ for _, s := range w.streams {
+ tenants[s.tenantID] = struct{}{}
+ }
+ return SegmentStats{
+ Age: t.Sub(w.firstAppend),
+ Idle: t.Sub(w.lastAppend),
+ Streams: len(w.streams),
+ Tenants: len(tenants),
+ Size: w.inputSize.Load(),
+ WriteSize: w.outputSize.Load(),
+ }
+}
+
+// ReportSegmentStats reports the stats as metrics.
+func ReportSegmentStats(s SegmentStats, m *SegmentMetrics) {
+ m.age.Observe(s.Age.Seconds())
+ m.streams.Observe(float64(s.Streams))
+ m.tenants.Observe(float64(s.Tenants))
+ m.size.Observe(float64(s.Size))
+ m.writeSize.Observe(float64(s.WriteSize))
+}
+
type streamSegment struct {
lbls labels.Labels
entries []*logproto.Entry
@@ -87,13 +125,12 @@ func (s *streamSegment) WriteTo(w io.Writer) (n int64, err error) {
}
// NewWalSegmentWriter creates a new WalSegmentWriter.
-func NewWalSegmentWriter(m *SegmentMetrics) (*SegmentWriter, error) {
+func NewWalSegmentWriter() (*SegmentWriter, error) {
idxWriter, err := index.NewWriter()
if err != nil {
return nil, err
}
return &SegmentWriter{
- metrics: m,
streams: make(map[streamID]*streamSegment, 64),
buf1: encoding.EncWith(make([]byte, 0, 4)),
idxWriter: idxWriter,
@@ -159,19 +196,6 @@ func (b *SegmentWriter) Append(tenantID, labelsString string, lbls labels.Labels
}
}
-// ReportMetrics for the writer. If called before WriteTo then the output size
-// histogram will observe 0.
-func (b *SegmentWriter) ReportMetrics() {
- b.metrics.streams.Observe(float64(len(b.streams)))
- tenants := make(map[string]struct{}, 64)
- for _, s := range b.streams {
- tenants[s.tenantID] = struct{}{}
- }
- b.metrics.tenants.Observe(float64(len(tenants)))
- b.metrics.inputSizeBytes.Observe(float64(b.inputSize.Load()))
- b.metrics.outputSizeBytes.Observe(float64(b.outputSize.Load()))
-}
-
func (b *SegmentWriter) Meta(id string) *metastorepb.BlockMeta {
var globalMinT, globalMaxT int64
diff --git a/pkg/storage/wal/segment_test.go b/pkg/storage/wal/segment_test.go
index 2227a191f8866..90755adcfcc3f 100644
--- a/pkg/storage/wal/segment_test.go
+++ b/pkg/storage/wal/segment_test.go
@@ -105,7 +105,7 @@ func TestWalSegmentWriter_Append(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
// Create a new WalSegmentWriter
- w, err := NewWalSegmentWriter(NewSegmentMetrics(nil))
+ w, err := NewWalSegmentWriter()
require.NoError(t, err)
// Append the entries
for _, batch := range tt.batches {
@@ -132,7 +132,7 @@ func TestWalSegmentWriter_Append(t *testing.T) {
}
func TestMultiTenantWrite(t *testing.T) {
- w, err := NewWalSegmentWriter(NewSegmentMetrics(nil))
+ w, err := NewWalSegmentWriter()
require.NoError(t, err)
dst := bytes.NewBuffer(nil)
@@ -202,7 +202,7 @@ func TestCompression(t *testing.T) {
}
func testCompression(t *testing.T, maxInputSize int64) {
- w, err := NewWalSegmentWriter(NewSegmentMetrics(nil))
+ w, err := NewWalSegmentWriter()
require.NoError(t, err)
dst := bytes.NewBuffer(nil)
files := testdata.Files()
@@ -259,7 +259,7 @@ func testCompression(t *testing.T, maxInputSize int64) {
}
func TestReset(t *testing.T) {
- w, err := NewWalSegmentWriter(NewSegmentMetrics(nil))
+ w, err := NewWalSegmentWriter()
require.NoError(t, err)
dst := bytes.NewBuffer(nil)
@@ -290,7 +290,7 @@ func TestReset(t *testing.T) {
}
func Test_Meta(t *testing.T) {
- w, err := NewWalSegmentWriter(NewSegmentMetrics(nil))
+ w, err := NewWalSegmentWriter()
buff := bytes.NewBuffer(nil)
require.NoError(t, err)
@@ -381,7 +381,7 @@ func BenchmarkWrites(b *testing.B) {
dst := bytes.NewBuffer(make([]byte, 0, inputSize))
- writer, err := NewWalSegmentWriter(NewSegmentMetrics(nil))
+ writer, err := NewWalSegmentWriter()
require.NoError(b, err)
for _, d := range data {
|
feat
|
add loki_ingester_rf1_segment_age_seconds metric (#13653)
|
9fe31c14ee68845397337e64ef0d8cc8ae6ebe9b
|
2024-04-05 13:13:44
|
Salva Corts
|
fix: Update deprecated angular graph panel in dashboards mixin (#12430)
| false
|
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-chunks.json b/production/loki-mixin-compiled-ssd/dashboards/loki-chunks.json
index bec1997c20d47..fe9c354f4f93c 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-chunks.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-chunks.json
@@ -27,156 +27,98 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(loki_ingester_memory_chunks{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "series",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Series",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(loki_ingester_memory_chunks{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}) / sum(loki_ingester_memory_streams{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "chunks",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunks per series",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -191,81 +133,67 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le)) * 1",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le)) * 1",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_ingester_chunk_utilization_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) * 1 / sum(rate(loki_ingester_chunk_utilization_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Utilization",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
- "format": "percentunit",
+ "format": "ms",
"label": null,
"logBase": 1,
"max": null,
@@ -283,78 +211,64 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 4,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_age_seconds_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_age_seconds_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_ingester_chunk_age_seconds_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) * 1e3 / sum(rate(loki_ingester_chunk_age_seconds_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Age",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -387,81 +301,67 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 5,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_entries_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le)) * 1",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_entries_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) by (le)) * 1",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_ingester_chunk_entries_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) * 1 / sum(rate(loki_ingester_chunk_entries_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Log Entries Per Chunk",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
- "format": "short",
+ "format": "ms",
"label": null,
"logBase": 1,
"max": null,
@@ -479,80 +379,51 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(rate(loki_chunk_store_index_entries_per_chunk_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Index Entries",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Index Entries Per Chunk",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -567,80 +438,51 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 7,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "loki_ingester_flush_queue_length{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"} or cortex_ingester_flush_queue_length{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Queue Length",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
"aliasColors": {
@@ -649,82 +491,196 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_ingester_chunk_age_seconds_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_ingester_chunk_age_seconds_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Flush Rate",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -739,138 +695,99 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 9,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunks Flushed/Second",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 10,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (reason) (rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval])) / ignoring(reason) group_left sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{reason}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunk Flush Reason",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "short",
@@ -1027,96 +944,63 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 13,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 13,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 12,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[1m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p99",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "histogram_quantile(0.90, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[1m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p90",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[1m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p50",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunk Size Quantiles",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1131,96 +1015,63 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 14,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 14,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 12,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p50",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p99",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "sum(rate(loki_ingester_chunk_bounds_hours_sum{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m])) / sum(rate(loki_ingester_chunk_bounds_hours_count{cluster=\"$cluster\", job=~\"$namespace/(loki|enterprise-logs)-write\"}[5m]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "avg",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunk Duration hours (end-start)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1244,7 +1095,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json
index d66419fc10907..d7748b960885b 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json
@@ -62,7 +62,6 @@
"expr": "sum(loki_compactor_pending_delete_requests_count{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
"instant": true,
- "intervalFactor": 2,
"refId": "A"
}
],
@@ -138,7 +137,6 @@
"expr": "max(loki_compactor_oldest_pending_delete_request_age_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
"instant": true,
- "intervalFactor": 2,
"refId": "A"
}
],
@@ -191,232 +189,145 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "(loki_compactor_delete_requests_received_total{cluster=~\"$cluster\", namespace=~\"$namespace\"} or on() vector(0)) - on () (loki_compactor_delete_requests_processed_total{cluster=~\"$cluster\", namespace=~\"$namespace\"} or on () vector(0))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "in progress",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "# of Delete Requests (received - processed) ",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 4,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(increase(loki_compactor_delete_requests_received_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1d]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "received",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Delete Requests Received / Day",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 5,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(increase(loki_compactor_delete_requests_processed_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1d]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "processed",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Delete Requests Processed / Day",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -431,232 +342,145 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compactor CPU usage",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 7,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} / 1024 / 1024 ",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": " {{pod}} ",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compactor memory usage (MiB)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 8,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "loki_boltdb_shipper_compact_tables_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compaction run duration (seconds)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -671,156 +495,98 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 9,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(increase(loki_compactor_load_pending_requests_attempts_total{status=\"fail\", cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "failures",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Failures in Loading Delete Requests / Hour",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 10,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\",job=~\"$namespace/(loki|enterprise-logs)-read\"}[$__rate_interval])) by (user)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{user}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Lines Deleted / Sec",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -882,7 +648,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json b/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json
index d1a2ebaae5a70..90691632b6c29 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-logs.json
@@ -861,7 +861,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-mixin-recording-rules.json b/production/loki-mixin-compiled-ssd/dashboards/loki-mixin-recording-rules.json
index 3bb931f5193e9..1234065bb6f70 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-mixin-recording-rules.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-mixin-recording-rules.json
@@ -600,7 +600,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json
index 71e950931e0e8..5610b088dceb0 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-operational.json
@@ -6070,7 +6070,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json b/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json
index e54223c76aa13..1d7fe6febe96d 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-reads-resources.json
@@ -27,514 +27,397 @@
"collapsed": false,
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-read\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Writes",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Reads",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"(loki|enterprise-logs)-read.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{persistentvolumeclaim}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Space Utilization",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -550,280 +433,256 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 7,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 8,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 9,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -847,7 +706,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json b/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
index 79c0c82ba0175..334059b99f969 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-reads.json
@@ -33,113 +33,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-read\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"})) * 1e3",
@@ -166,23 +287,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -203,35 +309,42 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-read\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval])) by (le,pod)) * 1e3",
@@ -243,41 +356,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -298,156 +378,256 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", operation=\"Shipper.Query\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", operation=\"Shipper.Query\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 5,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", operation=\"Shipper.Query\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", operation=\"Shipper.Query\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -468,35 +648,42 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-read\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le,pod)) * 1e3",
@@ -508,41 +695,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -566,7 +720,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
index 95bc7b6e0f83b..1e3edc736160a 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-retention.json
@@ -27,280 +27,256 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-read.*\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-read\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -423,80 +399,51 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 5,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "loki_boltdb_shipper_compact_tables_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "duration",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compact Tables Operations Duration",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -511,156 +458,98 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__range]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{table_name}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Number of times Tables were skipped during Compaction",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 7,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (status)(rate(loki_boltdb_shipper_compact_tables_operation_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{success}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compact Tables Operations Per Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -783,156 +672,98 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 9,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "loki_compactor_apply_retention_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "duration",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Mark Operations Duration",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 10,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (status)(rate(loki_compactor_apply_retention_operation_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{success}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Mark Operations Per Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -947,232 +778,145 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
- "id": 11,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 0,
+ "id": 11,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "count by(action)(loki_boltdb_shipper_retention_marker_table_processed_total{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{action}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Processed Tables Per Action",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
- "id": 12,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 0,
+ "id": 12,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "count by(table,action)(loki_boltdb_shipper_retention_marker_table_processed_total{cluster=~\"$cluster\", namespace=~\"$namespace\" , action=~\"modified|deleted\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{table}}-{{action}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Modified Tables",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
- "id": 13,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 0,
+ "id": 13,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (table)(rate(loki_boltdb_shipper_retention_marker_count_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) >0",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{table}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Marks Creation Rate Per Table",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1187,154 +931,113 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
"format": "short",
"id": 14,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum (increase(loki_boltdb_shipper_retention_marker_count_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[24h]))",
"format": "time_series",
"instant": true,
- "intervalFactor": 2,
"refId": "A"
}
],
"thresholds": "70,80",
- "timeFrom": null,
- "timeShift": null,
"title": "Marked Chunks (24h)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "singlestat",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "singlestat"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 15,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 15,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_sum{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Mark Table Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -1367,154 +1070,113 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
"format": "short",
"id": 16,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum (increase(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[24h]))",
"format": "time_series",
- "instant": true,
- "intervalFactor": 2,
- "refId": "A"
- }
- ],
- "thresholds": "70,80",
- "timeFrom": null,
- "timeShift": null,
- "title": "Delete Chunks (24h)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "singlestat",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
- },
- {
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "id": 17,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "instant": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "title": "Delete Chunks (24h)",
+ "type": "singlestat"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 17,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_sum{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Delete Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -1547,232 +1209,145 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 18,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 18,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "time() - (loki_boltdb_shipper_retention_sweeper_marker_file_processing_current_time{cluster=~\"$cluster\", namespace=~\"$namespace\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "lag",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Sweeper Lag",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 19,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 19,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(loki_boltdb_shipper_retention_sweeper_marker_files_current{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "count",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Marks Files to Process",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 20,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 20,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (status)(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Delete Rate Per Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1821,7 +1396,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-writes-resources.json b/production/loki-mixin-compiled-ssd/dashboards/loki-writes-resources.json
index 2a5b26fa5bd6f..47da95f78fc2b 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-writes-resources.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-writes-resources.json
@@ -27,589 +27,447 @@
"collapsed": false,
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (loki_ingester_memory_streams{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "In-memory streams",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Writes",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"loki\", pod=~\"(loki|enterprise-logs)-write.*\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Reads",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"(loki|enterprise-logs)-write.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{persistentvolumeclaim}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Space Utilization",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -634,7 +492,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json b/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json
index 317d8a06bb67c..d64cb15c10129 100644
--- a/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json
+++ b/production/loki-mixin-compiled-ssd/dashboards/loki-writes.json
@@ -33,113 +33,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/(loki|enterprise-logs)-write\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3",
@@ -166,23 +287,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -215,138 +321,99 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\",}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "bytes",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Total Received Bytes",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 4,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\",}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{tenant}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Tenant",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "short",
@@ -385,156 +452,256 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\", operation=\"WRITE\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\", operation=\"WRITE\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\", operation=\"WRITE\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(loki|enterprise-logs)-write\", operation=\"WRITE\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -576,7 +743,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-chunks.json b/production/loki-mixin-compiled/dashboards/loki-chunks.json
index f84c6a1f0751c..b1304ffaf7538 100644
--- a/production/loki-mixin-compiled/dashboards/loki-chunks.json
+++ b/production/loki-mixin-compiled/dashboards/loki-chunks.json
@@ -27,156 +27,98 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(loki_ingester_memory_chunks{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "series",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Series",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(loki_ingester_memory_chunks{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}) / sum(loki_ingester_memory_streams{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "chunks",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunks per series",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -191,81 +133,67 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le)) * 1",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_utilization_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le)) * 1",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_ingester_chunk_utilization_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) * 1 / sum(rate(loki_ingester_chunk_utilization_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Utilization",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
- "format": "percentunit",
+ "format": "ms",
"label": null,
"logBase": 1,
"max": null,
@@ -283,78 +211,64 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 4,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_age_seconds_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_age_seconds_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_ingester_chunk_age_seconds_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) * 1e3 / sum(rate(loki_ingester_chunk_age_seconds_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Age",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -387,81 +301,67 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 5,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_entries_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le)) * 1",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_entries_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) by (le)) * 1",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_ingester_chunk_entries_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) * 1 / sum(rate(loki_ingester_chunk_entries_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Log Entries Per Chunk",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
- "format": "short",
+ "format": "ms",
"label": null,
"logBase": 1,
"max": null,
@@ -479,80 +379,51 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(rate(loki_chunk_store_index_entries_per_chunk_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Index Entries",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Index Entries Per Chunk",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -567,80 +438,51 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 7,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "loki_ingester_flush_queue_length{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"} or cortex_ingester_flush_queue_length{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Queue Length",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
"aliasColors": {
@@ -649,82 +491,196 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_ingester_chunk_age_seconds_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_ingester_chunk_age_seconds_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Flush Rate",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -739,138 +695,99 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 9,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunks Flushed/Second",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 10,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (reason) (rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval])) / ignoring(reason) group_left sum(rate(loki_ingester_chunks_flushed_total{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{reason}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunk Flush Reason",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "short",
@@ -1027,96 +944,63 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 13,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 13,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 12,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[1m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p99",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "histogram_quantile(0.90, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[1m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p90",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_ingester_chunk_size_bytes_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[1m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p50",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunk Size Quantiles",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1131,96 +1015,63 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 14,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 14,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 12,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p50",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "histogram_quantile(0.99, sum(rate(loki_ingester_chunk_bounds_hours_bucket{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m])) by (le))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "p99",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "sum(rate(loki_ingester_chunk_bounds_hours_sum{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m])) / sum(rate(loki_ingester_chunk_bounds_hours_count{cluster=\"$cluster\", job=~\"$namespace/ingester.*\"}[5m]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "avg",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Chunk Duration hours (end-start)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1244,7 +1095,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-deletion.json b/production/loki-mixin-compiled/dashboards/loki-deletion.json
index c37176a9dc69c..939f37e481a82 100644
--- a/production/loki-mixin-compiled/dashboards/loki-deletion.json
+++ b/production/loki-mixin-compiled/dashboards/loki-deletion.json
@@ -62,7 +62,6 @@
"expr": "sum(loki_compactor_pending_delete_requests_count{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
"instant": true,
- "intervalFactor": 2,
"refId": "A"
}
],
@@ -138,7 +137,6 @@
"expr": "max(loki_compactor_oldest_pending_delete_request_age_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
"instant": true,
- "intervalFactor": 2,
"refId": "A"
}
],
@@ -191,232 +189,145 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "(loki_compactor_delete_requests_received_total{cluster=~\"$cluster\", namespace=~\"$namespace\"} or on() vector(0)) - on () (loki_compactor_delete_requests_processed_total{cluster=~\"$cluster\", namespace=~\"$namespace\"} or on () vector(0))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "in progress",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "# of Delete Requests (received - processed) ",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 4,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(increase(loki_compactor_delete_requests_received_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1d]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "received",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Delete Requests Received / Day",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 5,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(increase(loki_compactor_delete_requests_processed_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[1d]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "processed",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Delete Requests Processed / Day",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -431,232 +342,145 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compactor CPU usage",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 7,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} / 1024 / 1024 ",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": " {{pod}} ",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compactor memory usage (MiB)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 8,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "loki_boltdb_shipper_compact_tables_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compaction run duration (seconds)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -671,156 +495,98 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 9,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(increase(loki_compactor_load_pending_requests_attempts_total{status=\"fail\", cluster=~\"$cluster\", namespace=~\"$namespace\"}[1h]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "failures",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Failures in Loading Delete Requests / Hour",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 10,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(rate(loki_compactor_deleted_lines{cluster=~\"$cluster\",job=~\"$namespace/compactor\"}[$__rate_interval])) by (user)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{user}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Lines Deleted / Sec",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -882,7 +648,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-logs.json b/production/loki-mixin-compiled/dashboards/loki-logs.json
index d1a2ebaae5a70..90691632b6c29 100644
--- a/production/loki-mixin-compiled/dashboards/loki-logs.json
+++ b/production/loki-mixin-compiled/dashboards/loki-logs.json
@@ -861,7 +861,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-mixin-recording-rules.json b/production/loki-mixin-compiled/dashboards/loki-mixin-recording-rules.json
index 3bb931f5193e9..1234065bb6f70 100644
--- a/production/loki-mixin-compiled/dashboards/loki-mixin-recording-rules.json
+++ b/production/loki-mixin-compiled/dashboards/loki-mixin-recording-rules.json
@@ -600,7 +600,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-operational.json b/production/loki-mixin-compiled/dashboards/loki-operational.json
index de4735b4bae6d..133dbc27b51b5 100644
--- a/production/loki-mixin-compiled/dashboards/loki-operational.json
+++ b/production/loki-mixin-compiled/dashboards/loki-operational.json
@@ -6572,7 +6572,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-reads-resources.json b/production/loki-mixin-compiled/dashboards/loki-reads-resources.json
index 9f1dc904125fa..7141e4033a307 100644
--- a/production/loki-mixin-compiled/dashboards/loki-reads-resources.json
+++ b/production/loki-mixin-compiled/dashboards/loki-reads-resources.json
@@ -27,280 +27,256 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-frontend\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-frontend\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-frontend\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-frontend\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-frontend\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-frontend\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-frontend\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/query-frontend\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -315,280 +291,256 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-scheduler\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-scheduler\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-scheduler\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-scheduler\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-scheduler\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-scheduler\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"query-scheduler\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/query-scheduler\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -603,514 +555,397 @@
"collapsed": false,
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"querier\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"querier\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"querier\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"querier\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"querier\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"querier\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"querier\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/querier\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"querier\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Writes",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 11,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"querier\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Reads",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 12,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"querier.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{persistentvolumeclaim}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Space Utilization",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1126,514 +961,397 @@
"collapsed": false,
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 13,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 14,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 15,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/index-gateway\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 16,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Writes",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 17,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"index-gateway\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Reads",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 18,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"index-gateway.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{persistentvolumeclaim}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Space Utilization",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1649,280 +1367,256 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"id": 19,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"id": 20,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"id": 21,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/ingester.+\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1937,360 +1631,303 @@
"collapsed": false,
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 22,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (loki_prometheus_rule_group_rules{cluster=~\"$cluster\", job=~\"($namespace)/ruler\"}) or sum by(pod) (cortex_prometheus_rule_group_rules{cluster=~\"$cluster\", job=~\"($namespace)/ruler\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Rules",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 23,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"ruler\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"ruler\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"ruler\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"ruler\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 24,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"ruler\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"ruler\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"ruler\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 25,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/ruler\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -2315,7 +1952,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-reads.json b/production/loki-mixin-compiled/dashboards/loki-reads.json
index f8d38ba9d484d..c28b57b76cc71 100644
--- a/production/loki-mixin-compiled/dashboards/loki-reads.json
+++ b/production/loki-mixin-compiled/dashboards/loki-reads.json
@@ -33,113 +33,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/query-frontend\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/query-frontend\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/query-frontend\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"})) * 1e3",
@@ -166,23 +287,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -203,35 +309,42 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket{cluster=~\"$cluster\", job=~\"($namespace)/query-frontend\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval])) by (le,pod)) * 1e3",
@@ -243,41 +356,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -298,113 +378,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/querier\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/querier\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 5,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/querier\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"})) * 1e3",
@@ -431,23 +632,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -468,35 +654,42 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket{cluster=~\"$cluster\", job=~\"($namespace)/querier\", route=~\"loki_api_v1_series|api_prom_series|api_prom_query|api_prom_label|api_prom_label_name_values|loki_api_v1_query|loki_api_v1_query_range|loki_api_v1_labels|loki_api_v1_label_name_values\"}[$__rate_interval])) by (le,pod)) * 1e3",
@@ -508,41 +701,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -563,113 +723,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 8,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"})) * 1e3",
@@ -696,23 +977,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -733,35 +999,42 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 9,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket{cluster=~\"$cluster\", job=~\"($namespace)/ingester\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"}[$__rate_interval])) by (le,pod)) * 1e3",
@@ -773,41 +1046,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -828,113 +1068,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester-zone.*\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester-zone.*\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 11,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 11,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le,route) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/ingester-zone.*\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"})) * 1e3",
@@ -961,23 +1322,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -998,35 +1344,42 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 12,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 12,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket{cluster=~\"$cluster\", job=~\"($namespace)/ingester-zone.*\", route=~\"/logproto.Querier/Query|/logproto.Querier/Label|/logproto.Querier/Series|/logproto.Querier/QuerySample|/logproto.Querier/GetChunkIDs\"}[$__rate_interval])) by (le,pod)) * 1e3",
@@ -1038,41 +1391,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1093,156 +1413,256 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 13,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_index_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/querier\", operation!=\"index_chunk\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_index_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/querier\", operation!=\"index_chunk\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 14,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 14,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_index_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/querier\", operation!=\"index_chunk\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_index_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/querier\", operation!=\"index_chunk\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_index_request_duration_seconds_sum{cluster=~\"$cluster\",job=~\"($namespace)/querier\", operation!=\"index_chunk\"}[$__rate_interval])) * 1e3 / sum(rate(loki_index_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/querier\", operation!=\"index_chunk\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -1263,35 +1683,42 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 15,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 15,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_index_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/querier\", operation!=\"index_chunk\"}[$__rate_interval])) by (le,pod)) * 1e3",
@@ -1303,41 +1730,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1358,156 +1752,256 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 16,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(querier|index-gateway)\", operation=\"Shipper.Query\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(querier|index-gateway)\", operation=\"Shipper.Query\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 17,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 17,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/(querier|index-gateway)\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/(querier|index-gateway)\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{cluster=~\"$cluster\",job=~\"($namespace)/(querier|index-gateway)\", operation=\"Shipper.Query\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/(querier|index-gateway)\", operation=\"Shipper.Query\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -1528,35 +2022,42 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 18,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 18,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/(querier|index-gateway)\", operation=\"Shipper.Query\"}[$__rate_interval])) by (le,pod)) * 1e3",
@@ -1568,41 +2069,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Pod Latency (p99)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "ms",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1626,7 +2094,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-retention.json b/production/loki-mixin-compiled/dashboards/loki-retention.json
index a266d15734208..70c5171d9c391 100644
--- a/production/loki-mixin-compiled/dashboards/loki-retention.json
+++ b/production/loki-mixin-compiled/dashboards/loki-retention.json
@@ -27,280 +27,256 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"compactor\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/compactor\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -423,80 +399,51 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 5,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "loki_boltdb_shipper_compact_tables_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "duration",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compact Tables Operations Duration",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -511,156 +458,98 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(increase(loki_compactor_skipped_compacting_locked_table_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__range]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{table_name}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Number of times Tables were skipped during Compaction",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 7,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (status)(rate(loki_boltdb_shipper_compact_tables_operation_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{success}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Compact Tables Operations Per Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -783,156 +672,98 @@
]
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 9,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "loki_compactor_apply_retention_operation_duration_seconds{cluster=~\"$cluster\", namespace=~\"$namespace\"}",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "duration",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Mark Operations Duration",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 10,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (status)(rate(loki_compactor_apply_retention_operation_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{success}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Mark Operations Per Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -947,232 +778,145 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
- "id": 11,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 0,
+ "id": 11,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "count by(action)(loki_boltdb_shipper_retention_marker_table_processed_total{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{action}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Processed Tables Per Action",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
- "id": 12,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 0,
+ "id": 12,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "count by(table,action)(loki_boltdb_shipper_retention_marker_table_processed_total{cluster=~\"$cluster\", namespace=~\"$namespace\" , action=~\"modified|deleted\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{table}}-{{action}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Modified Tables",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
- "id": 13,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 0,
+ "id": 13,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (table)(rate(loki_boltdb_shipper_retention_marker_count_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) >0",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{table}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Marks Creation Rate Per Table",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1187,154 +931,113 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
"format": "short",
"id": 14,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum (increase(loki_boltdb_shipper_retention_marker_count_total{cluster=~\"$cluster\", namespace=~\"$namespace\"}[24h]))",
"format": "time_series",
"instant": true,
- "intervalFactor": 2,
"refId": "A"
}
],
"thresholds": "70,80",
- "timeFrom": null,
- "timeShift": null,
"title": "Marked Chunks (24h)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "singlestat",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "singlestat"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 15,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 15,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_sum{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_retention_marker_table_processed_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Mark Table Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -1367,154 +1070,113 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
"format": "short",
"id": 16,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum (increase(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[24h]))",
"format": "time_series",
- "instant": true,
- "intervalFactor": 2,
- "refId": "A"
- }
- ],
- "thresholds": "70,80",
- "timeFrom": null,
- "timeShift": null,
- "title": "Delete Chunks (24h)",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "singlestat",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
- },
- {
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "$datasource",
- "fill": 1,
- "id": 17,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "instant": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": "70,80",
+ "title": "Delete Chunks (24h)",
+ "type": "singlestat"
+ },
+ {
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 17,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_bucket{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_sum{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Delete Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -1547,232 +1209,145 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 18,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "s"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 18,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "time() - (loki_boltdb_shipper_retention_sweeper_marker_file_processing_current_time{cluster=~\"$cluster\", namespace=~\"$namespace\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "lag",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Sweeper Lag",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "s",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 19,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 19,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum(loki_boltdb_shipper_retention_sweeper_marker_files_current{cluster=~\"$cluster\", namespace=~\"$namespace\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "count",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Marks Files to Process",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 20,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 20,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (status)(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{cluster=~\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Delete Rate Per Status",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -1821,7 +1396,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-writes-resources.json b/production/loki-mixin-compiled/dashboards/loki-writes-resources.json
index be48a1fe484cc..40e21484ee549 100644
--- a/production/loki-mixin-compiled/dashboards/loki-writes-resources.json
+++ b/production/loki-mixin-compiled/dashboards/loki-writes-resources.json
@@ -27,280 +27,256 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"distributor\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"distributor\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"distributor\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"distributor\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"distributor\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"distributor\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=~\"distributor\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 4,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/distributor\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -315,589 +291,447 @@
"collapsed": false,
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (loki_ingester_memory_streams{cluster=~\"$cluster\", job=~\"($namespace)/ingester.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "In-memory streams",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\", resource=\"cpu\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_cpu_quota{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"} / container_spec_cpu_period{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "CPU",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "request"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#FFC000",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "limit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E02F44",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.fillOpacity",
+ "value": 0
+ }
+ ]
+ }
+ ]
+ },
"gridPos": { },
"id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "request",
- "color": "#FFC000",
- "fill": 0
+ "options": {
+ "legend": {
+ "showLegend": true
},
- {
- "alias": "limit",
- "color": "#E02F44",
- "fill": 0
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
}
- ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ },
"targets": [
{
"expr": "max by(pod) (container_memory_working_set_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(kube_pod_container_resource_requests{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\", resource=\"memory\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "request",
- "legendLink": null,
- "step": 10
+ "legendLink": null
},
{
"expr": "min(container_spec_memory_limit_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\"} > 0)",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "limit",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (workingset)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(pod) (go_memstats_heap_inuse_bytes{cluster=~\"$cluster\", job=~\"($namespace)/ingester.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Memory (go heap inuse)",
"tooltip": {
"sort": 2
},
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "bytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Writes",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 10,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": true,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "sum by(instance, pod, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + ignoring(pod) group_right() (label_replace(count by(instance, pod, device) (container_fs_writes_bytes_total{cluster=~\"$cluster\", namespace=~\"$namespace\", container=\"ingester\", device!~\".*sda.*\"}), \"device\", \"$1\", \"device\", \"/dev/(.*)\") * 0)\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{pod}} - {{device}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Reads",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "Bps",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [ ]
+ },
"gridPos": { },
"id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"targets": [
{
"expr": "max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"} / kubelet_volume_stats_capacity_bytes{cluster=~\"$cluster\", namespace=~\"$namespace\"}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{cluster=~\"$cluster\", namespace=~\"$namespace\",label_name=~\"ingester.*.*\"})",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{persistentvolumeclaim}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Disk Space Utilization",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "percentunit",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
}
],
"repeat": null,
@@ -922,7 +756,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin-compiled/dashboards/loki-writes.json b/production/loki-mixin-compiled/dashboards/loki-writes.json
index 44528b02c5531..be1b3fde7cb7d 100644
--- a/production/loki-mixin-compiled/dashboards/loki-writes.json
+++ b/production/loki-mixin-compiled/dashboards/loki-writes.json
@@ -33,113 +33,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 1,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 2,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/distributor\", route=~\"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle\"})) * 1e3",
@@ -166,23 +287,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -215,138 +321,99 @@
"height": "250px",
"panels": [
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 3,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 3,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "sum (rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/distributor\",}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/distributor\",}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "bytes",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Total Received Bytes",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 4,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
"expr": "sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/distributor\",}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{cluster=~\"$cluster\",job=~\"($namespace)/distributor\",}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{tenant}}",
- "legendLink": null,
- "step": 10
+ "legendLink": null
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Per Tenant",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "short",
@@ -385,113 +452,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester-zone.*\", route=\"/logproto.Pusher/Push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester-zone.*\", route=\"/logproto.Pusher/Push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 6,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 6,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/ingester-zone.*\", route=\"/logproto.Pusher/Push\"})) * 1e3",
@@ -518,23 +706,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -573,113 +746,234 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 8,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum by (le) (cluster_job_route:loki_request_duration_seconds_bucket:sum_rate{cluster=~\"$cluster\", job=~\"($namespace)/ingester\", route=\"/logproto.Pusher/Push\"})) * 1e3",
@@ -706,23 +1000,8 @@
"step": 10
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -761,156 +1040,256 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_index_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester.*\", operation=\"index_chunk\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_index_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester.*\", operation=\"index_chunk\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 10,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 10,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_index_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/ingester.*\", operation=\"index_chunk\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_index_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/ingester.*\", operation=\"index_chunk\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_index_request_duration_seconds_sum{cluster=~\"$cluster\",job=~\"($namespace)/ingester.*\", operation=\"index_chunk\"}[$__rate_interval])) * 1e3 / sum(rate(loki_index_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester.*\", operation=\"index_chunk\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -949,156 +1328,256 @@
"3xx": "#6ED0E0",
"4xx": "#EF843C",
"5xx": "#E24D42",
+ "OK": "#7EB26D",
+ "cancel": "#A9A9A9",
"error": "#E24D42",
"success": "#7EB26D"
},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 100,
+ "lineWidth": 0,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "normal"
+ }
+ },
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "short"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EAB839",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#6ED0E0",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#EF843C",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5xx"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "cancel"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#A9A9A9",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#E24D42",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "#7EB26D",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
"fill": 10,
"id": 11,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
"linewidth": 0,
"links": [ ],
- "nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
"stack": true,
- "steppedLine": false,
"targets": [
{
- "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", operation=\"WRITE\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-z]+)\"))\n",
+ "expr": "sum by (status) (\n label_replace(label_replace(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", operation=\"WRITE\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "{{status}}",
- "refId": "A",
- "step": 10
+ "refId": "A"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "QPS",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": 0,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": false
- }
- ]
+ "type": "timeseries"
},
{
- "aliasColors": { },
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": "$datasource",
- "fill": 1,
- "id": 12,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "lineWidth": 1,
+ "pointSize": 5,
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ }
+ },
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [ ]
+ },
+ "unit": "ms"
+ },
+ "overrides": [ ]
},
- "lines": true,
- "linewidth": 1,
+ "id": 12,
"links": [ ],
"nullPointMode": "null as zero",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [ ],
- "spaceLength": 10,
+ "options": {
+ "legend": {
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
"span": 6,
- "stack": false,
- "steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "99th Percentile",
- "refId": "A",
- "step": 10
+ "refId": "A"
},
{
"expr": "histogram_quantile(0.50, sum(rate(loki_boltdb_shipper_request_duration_seconds_bucket{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", operation=\"WRITE\"}[$__rate_interval])) by (le)) * 1e3",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "50th Percentile",
- "refId": "B",
- "step": 10
+ "refId": "B"
},
{
"expr": "sum(rate(loki_boltdb_shipper_request_duration_seconds_sum{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", operation=\"WRITE\"}[$__rate_interval])) * 1e3 / sum(rate(loki_boltdb_shipper_request_duration_seconds_count{cluster=~\"$cluster\",job=~\"($namespace)/ingester\", operation=\"WRITE\"}[$__rate_interval]))",
"format": "time_series",
- "intervalFactor": 2,
"legendFormat": "Average",
- "refId": "C",
- "step": 10
+ "refId": "C"
}
],
- "thresholds": [ ],
- "timeFrom": null,
- "timeShift": null,
"title": "Latency",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [ ]
- },
+ "type": "timeseries",
"yaxes": [
{
"format": "ms",
@@ -1140,7 +1619,7 @@
"value": "default"
},
"hide": 0,
- "label": "Data Source",
+ "label": "Data source",
"name": "datasource",
"options": [ ],
"query": "prometheus",
diff --git a/production/loki-mixin/dashboards/dashboard-utils.libsonnet b/production/loki-mixin/dashboards/dashboard-utils.libsonnet
index 2d04c6cef8317..099d6810d2e1d 100644
--- a/production/loki-mixin/dashboards/dashboard-utils.libsonnet
+++ b/production/loki-mixin/dashboards/dashboard-utils.libsonnet
@@ -151,32 +151,41 @@ local utils = import 'mixin-utils/utils.libsonnet';
datasource: '$datasource',
},
CPUUsagePanel(title, matcher)::
- $.panel(title) +
+ $.newQueryPanel(title) +
$.queryPanel([
'sum by(pod) (rate(container_cpu_usage_seconds_total{%s, %s}[$__rate_interval]))' % [$.namespaceMatcher(), matcher],
'min(kube_pod_container_resource_requests{%s, %s, resource="cpu"} > 0)' % [$.namespaceMatcher(), matcher],
'min(container_spec_cpu_quota{%s, %s} / container_spec_cpu_period{%s, %s})' % [$.namespaceMatcher(), matcher, $.namespaceMatcher(), matcher],
], ['{{pod}}', 'request', 'limit']) +
{
- seriesOverrides: [
- {
- alias: 'request',
- color: '#FFC000',
- fill: 0,
- },
- {
- alias: 'limit',
- color: '#E02F44',
- fill: 0,
- },
- ],
tooltip: { sort: 2 }, // Sort descending.
+ } + {
+ fieldConfig+: {
+ overrides+: [
+ $.colorOverride('request', '#FFC000') + {
+ properties+: [
+ {
+ id: 'custom.fillOpacity',
+ value: 0,
+ },
+ ],
+ },
+ $.colorOverride('limit', '#E02F44') + {
+ properties+: [
+ {
+ id: 'custom.fillOpacity',
+ value: 0,
+ },
+ ],
+ },
+ ],
+ },
},
containerCPUUsagePanel(title, containerName)::
self.CPUUsagePanel(title, 'container=~"%s"' % containerName),
memoryWorkingSetPanel(title, matcher)::
- $.panel(title) +
+ $.newQueryPanel(title, 'bytes') +
$.queryPanel([
// We use "max" instead of "sum" otherwise during a rolling update of a statefulset we will end up
// summing the memory of the old pod (whose metric will be stale for 5m) to the new pod.
@@ -185,32 +194,39 @@ local utils = import 'mixin-utils/utils.libsonnet';
'min(container_spec_memory_limit_bytes{%s, %s} > 0)' % [$.namespaceMatcher(), matcher],
], ['{{pod}}', 'request', 'limit']) +
{
- seriesOverrides: [
- {
- alias: 'request',
- color: '#FFC000',
- fill: 0,
- },
- {
- alias: 'limit',
- color: '#E02F44',
- fill: 0,
- },
- ],
- yaxes: $.yaxes('bytes'),
tooltip: { sort: 2 }, // Sort descending.
+ } + {
+ fieldConfig+: {
+ overrides+: [
+ $.colorOverride('request', '#FFC000') + {
+ properties+: [
+ {
+ id: 'custom.fillOpacity',
+ value: 0,
+ },
+ ],
+ },
+ $.colorOverride('limit', '#E02F44') + {
+ properties+: [
+ {
+ id: 'custom.fillOpacity',
+ value: 0,
+ },
+ ],
+ },
+ ],
+ },
},
containerMemoryWorkingSetPanel(title, containerName)::
self.memoryWorkingSetPanel(title, 'container=~"%s"' % containerName),
goHeapInUsePanel(title, jobName)::
- $.panel(title) +
+ $.newQueryPanel(title, 'bytes') +
$.queryPanel(
'sum by(%s) (go_memstats_heap_inuse_bytes{%s})' % [$._config.per_instance_label, $.jobMatcher(jobName)],
'{{%s}}' % $._config.per_instance_label
) +
{
- yaxes: $.yaxes('bytes'),
tooltip: { sort: 2 }, // Sort descending.
},
@@ -221,6 +237,69 @@ local utils = import 'mixin-utils/utils.libsonnet';
filterNodeDiskContainer(containerName)::
self.filterNodeDisk('container="%s"' % containerName),
+ newQueryPanel(title, unit='short')::
+ super.timeseriesPanel(title) + {
+ fieldConfig+: {
+ defaults+: {
+ custom+: {
+ fillOpacity: 10,
+ },
+ unit: unit,
+ },
+ },
+ },
+
+ withStacking:: {
+ fieldConfig+: {
+ defaults+: {
+ custom+: {
+ fillOpacity: 100,
+ lineWidth: 0,
+ stacking: {
+ mode: 'normal',
+ group: 'A',
+ },
+ },
+ },
+ },
+ },
+
+ colorOverride(name, color):: {
+ matcher: {
+ id: 'byName',
+ options: name,
+ },
+ properties: [
+ {
+ id: 'color',
+ value: {
+ mode: 'fixed',
+ fixedColor: color,
+ },
+ },
+ ],
+ },
+
+ newQpsPanel(selector, statusLabelName='status_code')::
+ super.qpsPanel(selector, statusLabelName) + $.withStacking + {
+ fieldConfig+: {
+ defaults+: {
+ min: 0,
+ },
+ overrides: [
+ $.colorOverride('1xx', '#EAB839'),
+ $.colorOverride('2xx', '#7EB26D'),
+ $.colorOverride('3xx', '#6ED0E0'),
+ $.colorOverride('4xx', '#EF843C'),
+ $.colorOverride('5xx', '#E24D42'),
+ $.colorOverride('OK', '#7EB26D'),
+ $.colorOverride('cancel', '#A9A9A9'),
+ $.colorOverride('error', '#E24D42'),
+ $.colorOverride('success', '#7EB26D'),
+ ],
+ },
+ },
+
newStatPanel(queries, legends='', unit='percentunit', decimals=1, thresholds=[], instant=false, novalue='')::
super.queryPanel(queries, legends) + {
type: 'stat',
@@ -247,7 +326,6 @@ local utils = import 'mixin-utils/utils.libsonnet';
},
containerDiskSpaceUtilizationPanel(title, containerName)::
- $.panel(title) +
- $.queryPanel('max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{%s} / kubelet_volume_stats_capacity_bytes{%s}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{%s,%s})' % [$.namespaceMatcher(), $.namespaceMatcher(), $.namespaceMatcher(), $.containerLabelMatcher(containerName)], '{{persistentvolumeclaim}}') +
- { yaxes: $.yaxes('percentunit') },
+ $.newQueryPanel(title, 'percentunit') +
+ $.queryPanel('max by(persistentvolumeclaim) (kubelet_volume_stats_used_bytes{%s} / kubelet_volume_stats_capacity_bytes{%s}) and count by(persistentvolumeclaim) (kube_persistentvolumeclaim_labels{%s,%s})' % [$.namespaceMatcher(), $.namespaceMatcher(), $.namespaceMatcher(), $.containerLabelMatcher(containerName)], '{{persistentvolumeclaim}}'),
}
diff --git a/production/loki-mixin/dashboards/loki-chunks.libsonnet b/production/loki-mixin/dashboards/loki-chunks.libsonnet
index 99a1fa06fe8c7..87dfff7a4f064 100644
--- a/production/loki-mixin/dashboards/loki-chunks.libsonnet
+++ b/production/loki-mixin/dashboards/loki-chunks.libsonnet
@@ -15,11 +15,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
$.row('Active Series / Chunks')
.addPanel(
- $.panel('Series') +
+ $.newQueryPanel('Series') +
$.queryPanel('sum(loki_ingester_memory_chunks{%s})' % dashboards['loki-chunks.json'].labelsSelector, 'series'),
)
.addPanel(
- $.panel('Chunks per series') +
+ $.newQueryPanel('Chunks per series') +
$.queryPanel(
'sum(loki_ingester_memory_chunks{%s}) / sum(loki_ingester_memory_streams{%s})' % [
dashboards['loki-chunks.json'].labelsSelector,
@@ -32,24 +32,22 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
$.row('Flush Stats')
.addPanel(
- $.panel('Utilization') +
- $.latencyPanel('loki_ingester_chunk_utilization', '{%s}' % dashboards['loki-chunks.json'].labelsSelector, multiplier='1') +
- { yaxes: $.yaxes('percentunit') },
+ $.newQueryPanel('Utilization', 'percentunit') +
+ $.latencyPanel('loki_ingester_chunk_utilization', '{%s}' % dashboards['loki-chunks.json'].labelsSelector, multiplier='1'),
)
.addPanel(
- $.panel('Age') +
+ $.newQueryPanel('Age') +
$.latencyPanel('loki_ingester_chunk_age_seconds', '{%s}' % dashboards['loki-chunks.json'].labelsSelector),
),
)
.addRow(
$.row('Flush Stats')
.addPanel(
- $.panel('Log Entries Per Chunk') +
- $.latencyPanel('loki_ingester_chunk_entries', '{%s}' % dashboards['loki-chunks.json'].labelsSelector, multiplier='1') +
- { yaxes: $.yaxes('short') },
+ $.newQueryPanel('Log Entries Per Chunk', 'short') +
+ $.latencyPanel('loki_ingester_chunk_entries', '{%s}' % dashboards['loki-chunks.json'].labelsSelector, multiplier='1'),
)
.addPanel(
- $.panel('Index Entries Per Chunk') +
+ $.newQueryPanel('Index Entries Per Chunk') +
$.queryPanel(
'sum(rate(loki_chunk_store_index_entries_per_chunk_sum{%s}[5m])) / sum(rate(loki_chunk_store_index_entries_per_chunk_count{%s}[5m]))' % [
dashboards['loki-chunks.json'].labelsSelector,
@@ -62,22 +60,22 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
$.row('Flush Stats')
.addPanel(
- $.panel('Queue Length') +
+ $.newQueryPanel('Queue Length') +
$.queryPanel('loki_ingester_flush_queue_length{%(label)s} or cortex_ingester_flush_queue_length{%(label)s}' % { label: dashboards['loki-chunks.json'].labelsSelector }, '{{pod}}'),
)
.addPanel(
- $.panel('Flush Rate') +
- $.qpsPanel('loki_ingester_chunk_age_seconds_count{%s}' % dashboards['loki-chunks.json'].labelsSelector,),
+ $.newQueryPanel('Flush Rate') +
+ $.newQpsPanel('loki_ingester_chunk_age_seconds_count{%s}' % dashboards['loki-chunks.json'].labelsSelector,),
),
)
.addRow(
$.row('Flush Stats')
.addPanel(
- $.panel('Chunks Flushed/Second') +
+ $.newQueryPanel('Chunks Flushed/Second') +
$.queryPanel('sum(rate(loki_ingester_chunks_flushed_total{%s}[$__rate_interval]))' % dashboards['loki-chunks.json'].labelsSelector, '{{pod}}'),
)
.addPanel(
- $.panel('Chunk Flush Reason') +
+ $.newQueryPanel('Chunk Flush Reason') +
$.queryPanel('sum by (reason) (rate(loki_ingester_chunks_flushed_total{%s}[$__rate_interval])) / ignoring(reason) group_left sum(rate(loki_ingester_chunks_flushed_total{%s}[$__rate_interval]))' % [dashboards['loki-chunks.json'].labelsSelector, dashboards['loki-chunks.json'].labelsSelector], '{{reason}}') + {
stack: true,
yaxes: [
@@ -138,7 +136,7 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
$.row('Utilization')
.addPanel(
- $.panel('Chunk Size Quantiles') +
+ $.newQueryPanel('Chunk Size Quantiles', 'bytes') +
$.queryPanel(
[
'histogram_quantile(0.99, sum(rate(loki_ingester_chunk_size_bytes_bucket{%s}[1m])) by (le))' % dashboards['loki-chunks.json'].labelsSelector,
@@ -150,15 +148,13 @@ local utils = import 'mixin-utils/utils.libsonnet';
'p90',
'p50',
],
- ) + {
- yaxes: $.yaxes('bytes'),
- },
+ ),
)
)
.addRow(
$.row('Duration')
.addPanel(
- $.panel('Chunk Duration hours (end-start)') +
+ $.newQueryPanel('Chunk Duration hours (end-start)') +
$.queryPanel(
[
'histogram_quantile(0.5, sum(rate(loki_ingester_chunk_bounds_hours_bucket{%s}[5m])) by (le))' % dashboards['loki-chunks.json'].labelsSelector,
diff --git a/production/loki-mixin/dashboards/loki-deletion.libsonnet b/production/loki-mixin/dashboards/loki-deletion.libsonnet
index 58a46dd76dc11..5fdbcc769a588 100644
--- a/production/loki-mixin/dashboards/loki-deletion.libsonnet
+++ b/production/loki-mixin/dashboards/loki-deletion.libsonnet
@@ -28,39 +28,39 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
g.row('Churn')
.addPanel(
- g.panel('# of Delete Requests (received - processed) ') +
+ $.newQueryPanel('# of Delete Requests (received - processed) ') +
g.queryPanel('(loki_compactor_delete_requests_received_total{%s} or on() vector(0)) - on () (loki_compactor_delete_requests_processed_total{%s} or on () vector(0))' % [$.namespaceMatcher(), $.namespaceMatcher()], 'in progress'),
)
.addPanel(
- g.panel('Delete Requests Received / Day') +
+ $.newQueryPanel('Delete Requests Received / Day') +
g.queryPanel('sum(increase(loki_compactor_delete_requests_received_total{%s}[1d]))' % $.namespaceMatcher(), 'received'),
)
.addPanel(
- g.panel('Delete Requests Processed / Day') +
+ $.newQueryPanel('Delete Requests Processed / Day') +
g.queryPanel('sum(increase(loki_compactor_delete_requests_processed_total{%s}[1d]))' % $.namespaceMatcher(), 'processed'),
)
).addRow(
g.row('Compactor')
.addPanel(
- g.panel('Compactor CPU usage') +
+ $.newQueryPanel('Compactor CPU usage') +
g.queryPanel('node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%s, container="compactor"}' % $.namespaceMatcher(), '{{pod}}'),
)
.addPanel(
- g.panel('Compactor memory usage (MiB)') +
+ $.newQueryPanel('Compactor memory usage (MiB)') +
g.queryPanel('go_memstats_heap_inuse_bytes{%s, container="compactor"} / 1024 / 1024 ' % $.namespaceMatcher(), ' {{pod}} '),
)
.addPanel(
- g.panel('Compaction run duration (seconds)') +
+ $.newQueryPanel('Compaction run duration (seconds)') +
g.queryPanel('loki_boltdb_shipper_compact_tables_operation_duration_seconds{%s}' % $.namespaceMatcher(), '{{pod}}'),
)
).addRow(
g.row('Deletion metrics')
.addPanel(
- g.panel('Failures in Loading Delete Requests / Hour') +
+ $.newQueryPanel('Failures in Loading Delete Requests / Hour') +
g.queryPanel('sum(increase(loki_compactor_load_pending_requests_attempts_total{status="fail", %s}[1h]))' % $.namespaceMatcher(), 'failures'),
)
.addPanel(
- g.panel('Lines Deleted / Sec') +
+ $.newQueryPanel('Lines Deleted / Sec') +
g.queryPanel('sum(rate(loki_compactor_deleted_lines{' + $._config.per_cluster_label + '=~"$cluster",job=~"$namespace/%s"}[$__rate_interval])) by (user)' % compactor_matcher, '{{user}}'),
)
).addRow(
diff --git a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet
index 3d17903cf83c0..83c1f0eafb779 100644
--- a/production/loki-mixin/dashboards/loki-reads-resources.libsonnet
+++ b/production/loki-mixin/dashboards/loki-reads-resources.libsonnet
@@ -67,22 +67,20 @@ local utils = import 'mixin-utils/utils.libsonnet';
$.goHeapInUsePanel('Memory (go heap inuse)', 'querier'),
)
.addPanel(
- $.panel('Disk Writes') +
+ $.newQueryPanel('Disk Writes', 'Bps') +
$.queryPanel(
'sum by(%s, %s, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + %s' % [$._config.per_node_label, $._config.per_instance_label, $.filterNodeDiskContainer('querier')],
'{{%s}} - {{device}}' % $._config.per_instance_label
) +
- $.stack +
- { yaxes: $.yaxes('Bps') },
+ $.withStacking,
)
.addPanel(
- $.panel('Disk Reads') +
+ $.newQueryPanel('Disk Reads', 'Bps') +
$.queryPanel(
'sum by(%s, %s, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + %s' % [$._config.per_node_label, $._config.per_instance_label, $.filterNodeDiskContainer('querier')],
'{{%s}} - {{device}}' % $._config.per_instance_label
) +
- $.stack +
- { yaxes: $.yaxes('Bps') },
+ $.withStacking,
)
.addPanel(
$.containerDiskSpaceUtilizationPanel('Disk Space Utilization', 'querier'),
@@ -100,22 +98,20 @@ local utils = import 'mixin-utils/utils.libsonnet';
$.goHeapInUsePanel('Memory (go heap inuse)', index_gateway_job_matcher),
)
.addPanel(
- $.panel('Disk Writes') +
+ $.newQueryPanel('Disk Writes', 'Bps') +
$.queryPanel(
'sum by(%s, %s, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + %s' % [$._config.per_node_label, $._config.per_instance_label, $.filterNodeDisk(index_gateway_pod_matcher)],
'{{%s}} - {{device}}' % $._config.per_instance_label
) +
- $.stack +
- { yaxes: $.yaxes('Bps') },
+ $.withStacking,
)
.addPanel(
- $.panel('Disk Reads') +
+ $.newQueryPanel('Disk Reads', 'Bps') +
$.queryPanel(
'sum by(%s, %s, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + %s' % [$._config.per_node_label, $._config.per_instance_label, $.filterNodeDisk(index_gateway_pod_matcher)],
'{{%s}} - {{device}}' % $._config.per_instance_label
) +
- $.stack +
- { yaxes: $.yaxes('Bps') },
+ $.withStacking,
)
.addPanel(
$.containerDiskSpaceUtilizationPanel('Disk Space Utilization', index_gateway_job_matcher),
@@ -137,7 +133,7 @@ local utils = import 'mixin-utils/utils.libsonnet';
!$._config.ssd.enabled,
grafana.row.new('Ruler')
.addPanel(
- $.panel('Rules') +
+ $.newQueryPanel('Rules') +
$.queryPanel(
'sum by(%(label)s) (loki_prometheus_rule_group_rules{%(matcher)s}) or sum by(%(label)s) (cortex_prometheus_rule_group_rules{%(matcher)s})' % { label: $._config.per_instance_label, matcher: $.jobMatcher('ruler') },
'{{%s}}' % $._config.per_instance_label
diff --git a/production/loki-mixin/dashboards/loki-reads.libsonnet b/production/loki-mixin/dashboards/loki-reads.libsonnet
index 823b03126ccc5..3da4e200e1abc 100644
--- a/production/loki-mixin/dashboards/loki-reads.libsonnet
+++ b/production/loki-mixin/dashboards/loki-reads.libsonnet
@@ -21,11 +21,10 @@ local utils = import 'mixin-utils/utils.libsonnet';
legendFormat: '__auto',
},
],
- yaxes: $.yaxes('ms'),
},
local p99LatencyByPod(metric, selectorStr) =
- $.panel('Per Pod Latency (p99)') +
+ $.newQueryPanel('Per Pod Latency (p99)', 'ms') +
latencyPanelWithExtraGrouping(metric, selectorStr, '1e3', 'pod'),
'loki-reads.json': {
@@ -69,11 +68,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
$._config.internal_components,
$.row('Frontend (cortex_gw)')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].cortexGwSelector, http_routes])
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].cortexGwSelector, http_routes])
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-reads.json'].clusterMatchers + dashboards['loki-reads.json'].matchers.cortexgateway + [utils.selector.re('route', http_routes)],
@@ -92,11 +91,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
$.row(if $._config.ssd.enabled then 'Read Path' else 'Frontend (query-frontend)')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].queryFrontendSelector, http_routes])
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].queryFrontendSelector, http_routes])
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-reads.json'].clusterMatchers + dashboards['loki-reads.json'].matchers.queryFrontend + [utils.selector.re('route', http_routes)],
@@ -118,11 +117,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
!$._config.ssd.enabled,
$.row('Querier')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].querierSelector, http_routes])
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].querierSelector, http_routes])
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-reads.json'].clusterMatchers + dashboards['loki-reads.json'].matchers.querier + [utils.selector.re('route', http_routes)],
@@ -144,11 +143,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
!$._config.ssd.enabled,
$.row('Ingester')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].ingesterSelector, grpc_routes])
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].ingesterSelector, grpc_routes])
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-reads.json'].clusterMatchers + dashboards['loki-reads.json'].matchers.ingester + [utils.selector.re('route', grpc_routes)],
@@ -171,11 +170,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
!$._config.ssd.enabled,
$.row('Ingester - Zone Aware')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].ingesterZoneSelector, grpc_routes])
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route=~"%s"}' % [dashboards['loki-reads.json'].ingesterZoneSelector, grpc_routes])
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-reads.json'].clusterMatchers + dashboards['loki-reads.json'].matchers.ingesterZoneAware + [utils.selector.re('route', grpc_routes)],
@@ -197,11 +196,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
!$._config.ssd.enabled,
$.row('Index')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_index_request_duration_seconds_count{%s operation!="index_chunk"}' % dashboards['loki-reads.json'].querierSelector)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_index_request_duration_seconds_count{%s operation!="index_chunk"}' % dashboards['loki-reads.json'].querierSelector)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
$.latencyPanel('loki_index_request_duration_seconds', '{%s operation!="index_chunk"}' % dashboards['loki-reads.json'].querierSelector)
)
.addPanel(
@@ -215,11 +214,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
showBigTable,
$.row('BigTable')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_bigtable_request_duration_seconds_count{%s operation="/google.bigtable.v2.Bigtable/ReadRows"}' % dashboards['loki-reads.json'].querierSelector)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_bigtable_request_duration_seconds_count{%s operation="/google.bigtable.v2.Bigtable/ReadRows"}' % dashboards['loki-reads.json'].querierSelector)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_bigtable_request_duration_seconds',
dashboards['loki-reads.json'].clusterMatchers + dashboards['loki-reads.json'].matchers.querier + [utils.selector.eq('operation', '/google.bigtable.v2.Bigtable/ReadRows')]
@@ -229,11 +228,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
$.row('BoltDB Shipper')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_boltdb_shipper_request_duration_seconds_count{%s operation="Shipper.Query"}' % dashboards['loki-reads.json'].querierOrIndexGatewaySelector)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_boltdb_shipper_request_duration_seconds_count{%s operation="Shipper.Query"}' % dashboards['loki-reads.json'].querierOrIndexGatewaySelector)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
$.latencyPanel('loki_boltdb_shipper_request_duration_seconds', '{%s operation="Shipper.Query"}' % dashboards['loki-reads.json'].querierOrIndexGatewaySelector)
)
.addPanel(
diff --git a/production/loki-mixin/dashboards/loki-retention.libsonnet b/production/loki-mixin/dashboards/loki-retention.libsonnet
index a5aa45a13d756..9896a5246881b 100644
--- a/production/loki-mixin/dashboards/loki-retention.libsonnet
+++ b/production/loki-mixin/dashboards/loki-retention.libsonnet
@@ -30,19 +30,18 @@ local utils = import 'mixin-utils/utils.libsonnet';
$.fromNowPanel('Last Compact Tables Operation Success', 'loki_boltdb_shipper_compact_tables_operation_last_successful_run_timestamp_seconds')
)
.addPanel(
- $.panel('Compact Tables Operations Duration') +
- $.queryPanel(['loki_boltdb_shipper_compact_tables_operation_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']) +
- { yaxes: $.yaxes('s') },
+ $.newQueryPanel('Compact Tables Operations Duration', 's') +
+ $.queryPanel(['loki_boltdb_shipper_compact_tables_operation_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']),
)
)
.addRow(
$.row('')
.addPanel(
- $.panel('Number of times Tables were skipped during Compaction') +
+ $.newQueryPanel('Number of times Tables were skipped during Compaction') +
$.queryPanel(['sum(increase(loki_compactor_skipped_compacting_locked_table_total{%s}[$__range]))' % $.namespaceMatcher()], ['{{table_name}}']),
)
.addPanel(
- $.panel('Compact Tables Operations Per Status') +
+ $.newQueryPanel('Compact Tables Operations Per Status') +
$.queryPanel(['sum by (status)(rate(loki_boltdb_shipper_compact_tables_operation_total{%s}[$__rate_interval]))' % $.namespaceMatcher()], ['{{success}}']),
)
)
@@ -52,66 +51,66 @@ local utils = import 'mixin-utils/utils.libsonnet';
$.fromNowPanel('Last Mark Operation Success', 'loki_compactor_apply_retention_last_successful_run_timestamp_seconds')
)
.addPanel(
- $.panel('Mark Operations Duration') +
- $.queryPanel(['loki_compactor_apply_retention_operation_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']) +
- { yaxes: $.yaxes('s') },
+ $.newQueryPanel('Mark Operations Duration', 's') +
+ $.queryPanel(['loki_compactor_apply_retention_operation_duration_seconds{%s}' % $.namespaceMatcher()], ['duration']),
)
.addPanel(
- $.panel('Mark Operations Per Status') +
+ $.newQueryPanel('Mark Operations Per Status') +
$.queryPanel(['sum by (status)(rate(loki_compactor_apply_retention_operation_total{%s}[$__rate_interval]))' % $.namespaceMatcher()], ['{{success}}']),
)
)
.addRow(
$.row('Per Table Marker')
.addPanel(
- $.panel('Processed Tables Per Action') +
- $.queryPanel(['count by(action)(loki_boltdb_shipper_retention_marker_table_processed_total{%s})' % $.namespaceMatcher()], ['{{action}}']) + $.stack,
+ $.newQueryPanel('Processed Tables Per Action') +
+ $.queryPanel(['count by(action)(loki_boltdb_shipper_retention_marker_table_processed_total{%s})' % $.namespaceMatcher()], ['{{action}}']) +
+ $.withStacking,
)
.addPanel(
- $.panel('Modified Tables') +
- $.queryPanel(['count by(table,action)(loki_boltdb_shipper_retention_marker_table_processed_total{%s , action=~"modified|deleted"})' % $.namespaceMatcher()], ['{{table}}-{{action}}']) + $.stack,
+ $.newQueryPanel('Modified Tables') +
+ $.queryPanel(['count by(table,action)(loki_boltdb_shipper_retention_marker_table_processed_total{%s , action=~"modified|deleted"})' % $.namespaceMatcher()], ['{{table}}-{{action}}']) +
+ $.withStacking,
)
.addPanel(
- $.panel('Marks Creation Rate Per Table') +
- $.queryPanel(['sum by (table)(rate(loki_boltdb_shipper_retention_marker_count_total{%s}[$__rate_interval])) >0' % $.namespaceMatcher()], ['{{table}}']) + $.stack,
+ $.newQueryPanel('Marks Creation Rate Per Table') +
+ $.queryPanel(['sum by (table)(rate(loki_boltdb_shipper_retention_marker_count_total{%s}[$__rate_interval])) >0' % $.namespaceMatcher()], ['{{table}}']) +
+ $.withStacking,
)
)
.addRow(
$.row('')
.addPanel(
- $.panel('Marked Chunks (24h)') +
+ $.newQueryPanel('Marked Chunks (24h)') +
$.statPanel('sum (increase(loki_boltdb_shipper_retention_marker_count_total{%s}[24h]))' % $.namespaceMatcher(), 'short')
)
.addPanel(
- $.panel('Mark Table Latency') +
+ $.newQueryPanel('Mark Table Latency') +
$.latencyPanel('loki_boltdb_shipper_retention_marker_table_processed_duration_seconds', '{%s}' % $.namespaceMatcher())
)
)
.addRow(
$.row('Sweeper')
.addPanel(
- $.panel('Delete Chunks (24h)') +
+ $.newQueryPanel('Delete Chunks (24h)') +
$.statPanel('sum (increase(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{%s}[24h]))' % $.namespaceMatcher(), 'short')
)
.addPanel(
- $.panel('Delete Latency') +
+ $.newQueryPanel('Delete Latency') +
$.latencyPanel('loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds', '{%s}' % $.namespaceMatcher())
)
)
.addRow(
$.row('')
.addPanel(
- $.panel('Sweeper Lag') +
- $.queryPanel(['time() - (loki_boltdb_shipper_retention_sweeper_marker_file_processing_current_time{%s} > 0)' % $.namespaceMatcher()], ['lag']) + {
- yaxes: $.yaxes({ format: 's', min: null }),
- },
+ $.newQueryPanel('Sweeper Lag', 's') +
+ $.queryPanel(['time() - (loki_boltdb_shipper_retention_sweeper_marker_file_processing_current_time{%s} > 0)' % $.namespaceMatcher()], ['lag']),
)
.addPanel(
- $.panel('Marks Files to Process') +
+ $.newQueryPanel('Marks Files to Process') +
$.queryPanel(['sum(loki_boltdb_shipper_retention_sweeper_marker_files_current{%s})' % $.namespaceMatcher()], ['count']),
)
.addPanel(
- $.panel('Delete Rate Per Status') +
+ $.newQueryPanel('Delete Rate Per Status') +
$.queryPanel(['sum by (status)(rate(loki_boltdb_shipper_retention_sweeper_chunk_deleted_duration_seconds_count{%s}[$__rate_interval]))' % $.namespaceMatcher()], ['{{status}}']),
)
)
diff --git a/production/loki-mixin/dashboards/loki-writes-resources.libsonnet b/production/loki-mixin/dashboards/loki-writes-resources.libsonnet
index bffbe5b59dace..f25aeb4b546b4 100644
--- a/production/loki-mixin/dashboards/loki-writes-resources.libsonnet
+++ b/production/loki-mixin/dashboards/loki-writes-resources.libsonnet
@@ -41,7 +41,7 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
grafana.row.new(if $._config.ssd.enabled then 'Write path' else 'Ingester')
.addPanel(
- $.panel('In-memory streams') +
+ $.newQueryPanel('In-memory streams') +
$.queryPanel(
'sum by(%s) (loki_ingester_memory_streams{%s})' % [$._config.per_instance_label, $.jobMatcher(ingester_job_matcher)],
'{{%s}}' % $._config.per_instance_label
@@ -60,22 +60,20 @@ local utils = import 'mixin-utils/utils.libsonnet';
$.goHeapInUsePanel('Memory (go heap inuse)', ingester_job_matcher),
)
.addPanel(
- $.panel('Disk Writes') +
+ $.newQueryPanel('Disk Writes', 'Bps') +
$.queryPanel(
'sum by(%s, %s, device) (rate(node_disk_written_bytes_total[$__rate_interval])) + %s' % [$._config.per_node_label, $._config.per_instance_label, $.filterNodeDisk(ingester_pod_matcher)],
'{{%s}} - {{device}}' % $._config.per_instance_label
) +
- $.stack +
- { yaxes: $.yaxes('Bps') },
+ $.withStacking,
)
.addPanel(
- $.panel('Disk Reads') +
+ $.newQueryPanel('Disk Reads', 'Bps') +
$.queryPanel(
'sum by(%s, %s, device) (rate(node_disk_read_bytes_total[$__rate_interval])) + %s' % [$._config.per_node_label, $._config.per_instance_label, $.filterNodeDisk(ingester_pod_matcher)],
'{{%s}} - {{device}}' % $._config.per_instance_label
) +
- $.stack +
- { yaxes: $.yaxes('Bps') },
+ $.withStacking,
)
.addPanel(
$.containerDiskSpaceUtilizationPanel('Disk Space Utilization', ingester_job_matcher),
diff --git a/production/loki-mixin/dashboards/loki-writes.libsonnet b/production/loki-mixin/dashboards/loki-writes.libsonnet
index 878a1ee7d7872..bedb9ca108256 100644
--- a/production/loki-mixin/dashboards/loki-writes.libsonnet
+++ b/production/loki-mixin/dashboards/loki-writes.libsonnet
@@ -44,11 +44,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
$._config.internal_components,
$.row('Frontend (cortex_gw)')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s route=~"api_prom_push|loki_api_v1_push"}' % dashboards['loki-writes.json'].cortexGwSelector)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route=~"api_prom_push|loki_api_v1_push"}' % dashboards['loki-writes.json'].cortexGwSelector)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-writes.json'].clusterMatchers + dashboards['loki-writes.json'].matchers.cortexgateway + [utils.selector.re('route', 'api_prom_push|loki_api_v1_push')],
@@ -58,11 +58,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
$.row(if $._config.ssd.enabled then 'Write Path' else 'Distributor')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s, route=~"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle"}' % std.rstripChars(dashboards['loki-writes.json'].distributorSelector, ','))
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s, route=~"api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle"}' % std.rstripChars(dashboards['loki-writes.json'].distributorSelector, ','))
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-writes.json'].clusterMatchers + dashboards['loki-writes.json'].matchers.distributor + [utils.selector.re('route', 'api_prom_push|loki_api_v1_push|/httpgrpc.HTTP/Handle')],
@@ -73,11 +73,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
$._config.tsdb,
$.row(if $._config.ssd.enabled then 'Write Path' else 'Distributor - Structured Metadata')
.addPanel(
- $.panel('Per Total Received Bytes') +
+ $.newQueryPanel('Per Total Received Bytes') +
$.queryPanel('sum (rate(loki_distributor_structured_metadata_bytes_received_total{%s}[$__rate_interval])) / sum(rate(loki_distributor_bytes_received_total{%s}[$__rate_interval]))' % [dashboards['loki-writes.json'].distributorSelector, dashboards['loki-writes.json'].distributorSelector], 'bytes')
)
.addPanel(
- $.panel('Per Tenant') +
+ $.newQueryPanel('Per Tenant') +
$.queryPanel('sum by (tenant) (rate(loki_distributor_structured_metadata_bytes_received_total{%s}[$__rate_interval])) / ignoring(tenant) group_left sum(rate(loki_distributor_structured_metadata_bytes_received_total{%s}[$__rate_interval]))' % [dashboards['loki-writes.json'].distributorSelector, dashboards['loki-writes.json'].distributorSelector], '{{tenant}}') + {
stack: true,
yaxes: [
@@ -91,11 +91,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
!$._config.ssd.enabled,
$.row('Ingester - Zone Aware')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s route="/logproto.Pusher/Push"}' % dashboards['loki-writes.json'].ingesterZoneSelector)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route="/logproto.Pusher/Push"}' % dashboards['loki-writes.json'].ingesterZoneSelector)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-writes.json'].clusterMatchers + dashboards['loki-writes.json'].matchers.ingester_zone + [utils.selector.eq('route', '/logproto.Pusher/Push')],
@@ -106,12 +106,12 @@ local utils = import 'mixin-utils/utils.libsonnet';
!$._config.ssd.enabled,
$.row('Ingester')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_request_duration_seconds_count{%s route="/logproto.Pusher/Push"}' % dashboards['loki-writes.json'].ingesterSelector) +
- $.qpsPanel('loki_request_duration_seconds_count{%s route="/logproto.Pusher/Push"}' % dashboards['loki-writes.json'].ingesterSelector)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route="/logproto.Pusher/Push"}' % dashboards['loki-writes.json'].ingesterSelector) +
+ $.newQpsPanel('loki_request_duration_seconds_count{%s route="/logproto.Pusher/Push"}' % dashboards['loki-writes.json'].ingesterSelector)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_request_duration_seconds',
dashboards['loki-writes.json'].clusterMatchers + dashboards['loki-writes.json'].matchers.ingester + [utils.selector.eq('route', '/logproto.Pusher/Push')],
@@ -122,11 +122,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
!$._config.ssd.enabled,
$.row('Index')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_index_request_duration_seconds_count{%s operation="index_chunk"}' % dashboards['loki-writes.json'].anyIngester)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_index_request_duration_seconds_count{%s operation="index_chunk"}' % dashboards['loki-writes.json'].anyIngester)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
$.latencyPanel('loki_index_request_duration_seconds', '{%s operation="index_chunk"}' % dashboards['loki-writes.json'].anyIngester)
)
)
@@ -134,11 +134,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
showBigTable,
$.row('BigTable')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_bigtable_request_duration_seconds_count{%s operation="/google.bigtable.v2.Bigtable/MutateRows"}' % dashboards['loki-writes.json'].ingesterSelector)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_bigtable_request_duration_seconds_count{%s operation="/google.bigtable.v2.Bigtable/MutateRows"}' % dashboards['loki-writes.json'].ingesterSelector)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
utils.latencyRecordingRulePanel(
'loki_bigtable_request_duration_seconds',
dashboards['loki-writes.json'].clusterMatchers + dashboards['loki-writes.json'].clusterMatchers + dashboards['loki-writes.json'].matchers.ingester + [utils.selector.eq('operation', '/google.bigtable.v2.Bigtable/MutateRows')]
@@ -148,11 +148,11 @@ local utils = import 'mixin-utils/utils.libsonnet';
.addRow(
$.row('BoltDB Shipper')
.addPanel(
- $.panel('QPS') +
- $.qpsPanel('loki_boltdb_shipper_request_duration_seconds_count{%s operation="WRITE"}' % dashboards['loki-writes.json'].ingesterSelector)
+ $.newQueryPanel('QPS') +
+ $.newQpsPanel('loki_boltdb_shipper_request_duration_seconds_count{%s operation="WRITE"}' % dashboards['loki-writes.json'].ingesterSelector)
)
.addPanel(
- $.panel('Latency') +
+ $.newQueryPanel('Latency', 'ms') +
$.latencyPanel('loki_boltdb_shipper_request_duration_seconds', '{%s operation="WRITE"}' % dashboards['loki-writes.json'].ingesterSelector)
)
),
diff --git a/production/loki-mixin/jsonnetfile.lock.json b/production/loki-mixin/jsonnetfile.lock.json
index 4dd7a006b6b00..f895125a2aa3c 100644
--- a/production/loki-mixin/jsonnetfile.lock.json
+++ b/production/loki-mixin/jsonnetfile.lock.json
@@ -18,8 +18,8 @@
"subdir": "grafana-builder"
}
},
- "version": "3f71e00a64810075b5d5f969cc6d0e419cbdebc4",
- "sum": "TieGrr7GyKjURk1+wXHFpdoCiwNaIVfZvyc5mbI9OM0="
+ "version": "f95501009c9b29bed87fe9d57c1a6e72e210f137",
+ "sum": "+z5VY+bPBNqXcmNAV8xbJcbsRA+pro1R3IM7aIY8OlU="
},
{
"source": {
|
fix
|
Update deprecated angular graph panel in dashboards mixin (#12430)
|
8096748f1f205e766deab9438c4b2bc587facfc5
|
2024-04-22 22:53:50
|
Dzmitry Panamarenka
|
fix: Add missing OTLP endpoint to nginx config (#12709)
| false
|
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md
index d109756fdc742..0b9df02241efb 100644
--- a/production/helm/loki/CHANGELOG.md
+++ b/production/helm/loki/CHANGELOG.md
@@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang
[//]: # (<AUTOMATED_UPDATES_LOCATOR> : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.)
+## 6.3.4
+
+- [BUGFIX] Add missing OTLP endpoint to nginx config
+
## 6.3.3
- [ENHANCEMENT] make the singlebinary set 0 the replicas number of backend, write,read.
diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml
index 176e87af7d8b6..08d0fe98332c1 100644
--- a/production/helm/loki/Chart.yaml
+++ b/production/helm/loki/Chart.yaml
@@ -3,7 +3,7 @@ name: loki
description: Helm chart for Grafana Loki in simple, scalable mode
type: application
appVersion: 3.0.0
-version: 6.3.3
+version: 6.3.4
home: https://grafana.github.io/helm-charts
sources:
- https://github.com/grafana/loki
diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md
index b9ce79499a610..26fb34fd39f74 100644
--- a/production/helm/loki/README.md
+++ b/production/helm/loki/README.md
@@ -1,6 +1,6 @@
# loki
-  
+  
Helm chart for Grafana Loki in simple, scalable mode
diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl
index 2a1014d9174b4..5a93337f02e56 100644
--- a/production/helm/loki/templates/_helpers.tpl
+++ b/production/helm/loki/templates/_helpers.tpl
@@ -801,6 +801,9 @@ http {
location = /distributor/ring {
proxy_pass {{ $distributorUrl }}$request_uri;
}
+ location = /otlp/v1/logs {
+ proxy_pass {{ $distributorUrl }}$request_uri;
+ }
# Ingester
location = /flush {
|
fix
|
Add missing OTLP endpoint to nginx config (#12709)
|
32a9a3f45d2017e51c77432ca979f7deeb9794f6
|
2024-02-22 13:31:17
|
Christian Haudum
|
chore(blooms): Improve how block directory is extracted (#12030)
| false
|
diff --git a/pkg/storage/stores/shipper/bloomshipper/cache_test.go b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
index c85f0382bafdd..dc078ab702c0c 100644
--- a/pkg/storage/stores/shipper/bloomshipper/cache_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/cache_test.go
@@ -30,9 +30,6 @@ func TestBlockDirectory_Cleanup(t *testing.T) {
tc := tc
t.Run(name, func(t *testing.T) {
extractedBlockDirectory := t.TempDir()
- blockFilePath, _, _, _ := createBlockArchive(t)
- err := extractArchive(blockFilePath, extractedBlockDirectory)
- require.NoError(t, err)
require.DirExists(t, extractedBlockDirectory)
blockDir := BlockDirectory{
@@ -61,20 +58,10 @@ func TestBlockDirectory_Cleanup(t *testing.T) {
}
func Test_ClosableBlockQuerier(t *testing.T) {
- blockFilePath, _, _, _ := createBlockArchive(t)
- extractedBlockDirectory := t.TempDir()
- err := extractArchive(blockFilePath, extractedBlockDirectory)
- require.NoError(t, err)
-
- blockDir := BlockDirectory{
- Path: extractedBlockDirectory,
- removeDirectoryTimeout: 100 * time.Millisecond,
- refCount: atomic.NewInt32(0),
- }
+ blockDir := NewBlockDirectory(BlockRef{}, t.TempDir(), log.NewNopLogger())
querier := blockDir.BlockQuerier()
require.Equal(t, int32(1), blockDir.refCount.Load())
require.NoError(t, querier.Close())
require.Equal(t, int32(0), blockDir.refCount.Load())
-
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go
index 240f2b5166588..6ee47d78d578b 100644
--- a/pkg/storage/stores/shipper/bloomshipper/client.go
+++ b/pkg/storage/stores/shipper/bloomshipper/client.go
@@ -7,6 +7,7 @@ import (
"fmt"
"hash"
"io"
+ "strings"
"github.com/go-kit/log"
"github.com/grafana/dskit/concurrency"
@@ -15,6 +16,7 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
"github.com/grafana/loki/pkg/storage/chunk/client"
+ "github.com/grafana/loki/pkg/storage/chunk/client/util"
"github.com/grafana/loki/pkg/storage/config"
"github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/tsdb"
"github.com/grafana/loki/pkg/util/encoding"
@@ -264,18 +266,28 @@ func (b *BloomClient) DeleteMetas(ctx context.Context, refs []MetaRef) error {
return err
}
-// GetBlock downloads the blocks from objectStorage and returns the downloaded block
+// GetBlock downloads the blocks from objectStorage and returns the directory
+// in which the block data resides
func (b *BloomClient) GetBlock(ctx context.Context, ref BlockRef) (BlockDirectory, error) {
key := b.Block(ref).Addr()
- readCloser, _, err := b.client.GetObject(ctx, key)
+
+ rc, _, err := b.client.GetObject(ctx, key)
if err != nil {
return BlockDirectory{}, fmt.Errorf("failed to get block from storage: %w", err)
}
+ defer rc.Close()
path := b.fsResolver.Block(ref).LocalPath()
- err = extractBlock(readCloser, path, b.logger)
+ // the block directory should not contain the .tar.gz extension
+ path = strings.TrimSuffix(path, ".tar.gz")
+ err = util.EnsureDirectory(path)
+ if err != nil {
+ return BlockDirectory{}, fmt.Errorf("failed to create block directory: %w", err)
+ }
+
+ err = v1.UnTarGz(path, rc)
if err != nil {
- return BlockDirectory{}, fmt.Errorf("failed to extract block into directory : %w", err)
+ return BlockDirectory{}, fmt.Errorf("failed to extract block: %w", err)
}
return NewBlockDirectory(ref, path, b.logger), nil
diff --git a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
index 332c900fe29d5..57025113cea71 100644
--- a/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
+++ b/pkg/storage/stores/shipper/bloomshipper/compress_utils.go
@@ -1,14 +1,10 @@
package bloomshipper
import (
- "fmt"
- "io"
"os"
- "path/filepath"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/google/uuid"
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
@@ -31,49 +27,3 @@ func CompressBloomBlock(ref BlockRef, archivePath, localDst string, logger log.L
return blockToUpload, nil
}
-
-func writeDataToTempFile(workingDirectoryPath string, data io.ReadCloser) (string, error) {
- defer data.Close()
- archivePath := filepath.Join(workingDirectoryPath, uuid.New().String())
-
- archiveFile, err := os.Create(archivePath)
- if err != nil {
- return "", fmt.Errorf("error creating empty file to store the archiver: %w", err)
- }
- defer archiveFile.Close()
- _, err = io.Copy(archiveFile, data)
- if err != nil {
- return "", fmt.Errorf("error writing data to archive file: %w", err)
- }
- return archivePath, nil
-}
-
-func extractArchive(archivePath string, workingDirectoryPath string) error {
- file, err := os.Open(archivePath)
- if err != nil {
- return fmt.Errorf("error opening archive file %s: %w", archivePath, err)
- }
- return v1.UnTarGz(workingDirectoryPath, file)
-}
-
-func extractBlock(data io.ReadCloser, blockDir string, logger log.Logger) error {
- err := os.MkdirAll(blockDir, os.ModePerm)
- if err != nil {
- return fmt.Errorf("can not create directory to extract the block: %w", err)
- }
- archivePath, err := writeDataToTempFile(blockDir, data)
- if err != nil {
- return fmt.Errorf("error writing data to temp file: %w", err)
- }
- defer func() {
- err = os.Remove(archivePath)
- if err != nil {
- level.Error(logger).Log("msg", "error removing temp archive file", "err", err)
- }
- }()
- err = extractArchive(archivePath, blockDir)
- if err != nil {
- return fmt.Errorf("error extracting archive: %w", err)
- }
- return nil
-}
diff --git a/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go b/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go
index 4fddf8e9c3485..11a6afb21af48 100644
--- a/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/compress_utils_test.go
@@ -13,28 +13,6 @@ import (
v1 "github.com/grafana/loki/pkg/storage/bloom/v1"
)
-func Test_blockDownloader_extractBlock(t *testing.T) {
- blockFilePath, _, bloomFileContent, seriesFileContent := createBlockArchive(t)
- blockFile, err := os.OpenFile(blockFilePath, os.O_RDONLY, 0700)
- require.NoError(t, err)
-
- workingDir := t.TempDir()
-
- err = extractBlock(blockFile, workingDir, nil)
- require.NoError(t, err)
-
- require.FileExists(t, filepath.Join(workingDir, v1.BloomFileName))
- require.FileExists(t, filepath.Join(workingDir, v1.SeriesFileName))
-
- actualBloomFileContent, err := os.ReadFile(filepath.Join(workingDir, v1.BloomFileName))
- require.NoError(t, err)
- require.Equal(t, bloomFileContent, string(actualBloomFileContent))
-
- actualSeriesFileContent, err := os.ReadFile(filepath.Join(workingDir, v1.SeriesFileName))
- require.NoError(t, err)
- require.Equal(t, seriesFileContent, string(actualSeriesFileContent))
-}
-
func directoryDoesNotExist(path string) bool {
_, err := os.Lstat(path)
return err != nil
@@ -42,7 +20,7 @@ func directoryDoesNotExist(path string) bool {
const testArchiveFileName = "test-block-archive"
-func createBlockArchive(t *testing.T) (string, string, string, string) {
+func createBlockArchive(t *testing.T) (string, io.Reader, string, string) {
dir := t.TempDir()
mockBlockDir := filepath.Join(dir, "mock-block-dir")
err := os.MkdirAll(mockBlockDir, 0777)
@@ -65,5 +43,7 @@ func createBlockArchive(t *testing.T) (string, string, string, string) {
err = v1.TarGz(file, v1.NewDirectoryBlockReader(mockBlockDir))
require.NoError(t, err)
- return blockFilePath, mockBlockDir, bloomFileContent, seriesFileContent
+ _, _ = file.Seek(0, 0)
+
+ return blockFilePath, file, bloomFileContent, seriesFileContent
}
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher.go b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
index 61dac17b21d87..366b37ec96dd4 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"os"
"path/filepath"
+ "strings"
"sync"
"github.com/go-kit/log"
@@ -222,6 +223,9 @@ func (f *Fetcher) loadBlocksFromFS(_ context.Context, refs []BlockRef) ([]BlockD
for _, ref := range refs {
path := f.localFSResolver.Block(ref).LocalPath()
+ // the block directory does not contain the .tar.gz extension
+ // since it is stripped when the archive is extracted into a folder
+ path = strings.TrimSuffix(path, ".tar.gz")
if ok, clean := f.isBlockDir(path); ok {
blockDirs = append(blockDirs, NewBlockDirectory(ref, path, f.logger))
} else {
diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
index 962bebb9956fd..cb89f6ef6b454 100644
--- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
+++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go
@@ -6,6 +6,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "strings"
"testing"
"time"
@@ -152,9 +153,9 @@ func TestFetcher_LoadBlocksFromFS(t *testing.T) {
{Ref: Ref{TenantID: "tenant", TableName: "12345", Bounds: v1.NewBounds(0x2000, 0x2fff)}},
}
dirs := []string{
- resolver.Block(refs[0]).LocalPath(),
- resolver.Block(refs[1]).LocalPath(),
- resolver.Block(refs[2]).LocalPath(),
+ strings.TrimSuffix(resolver.Block(refs[0]).LocalPath(), ".tar.gz"),
+ strings.TrimSuffix(resolver.Block(refs[1]).LocalPath(), ".tar.gz"),
+ strings.TrimSuffix(resolver.Block(refs[2]).LocalPath(), ".tar.gz"),
}
createBlockDir(t, dirs[1])
|
chore
|
Improve how block directory is extracted (#12030)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.